diff options
Diffstat (limited to 'pkg')
922 files changed, 170910 insertions, 0 deletions
diff --git a/pkg/abi/BUILD b/pkg/abi/BUILD new file mode 100644 index 000000000..4d507161f --- /dev/null +++ b/pkg/abi/BUILD @@ -0,0 +1,25 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "abi_state", + srcs = [ + "abi.go", + ], + out = "abi_state.go", + package = "abi", +) + +go_library( + name = "abi", + srcs = [ + "abi.go", + "abi_state.go", + "flag.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/abi", + visibility = ["//:sandbox"], + deps = ["//pkg/state"], +) diff --git a/pkg/abi/abi.go b/pkg/abi/abi.go new file mode 100644 index 000000000..a53c2747b --- /dev/null +++ b/pkg/abi/abi.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package abi describes the interface between a kernel and userspace. +package abi + +import ( + "fmt" +) + +// OS describes the target operating system for an ABI. +// +// Note that OS is architecture-independent. The details of the OS ABI will +// vary between architectures. +type OS int + +const ( + // Linux is the Linux ABI. + Linux OS = iota +) + +// String implements fmt.Stringer. +func (o OS) String() string { + switch o { + case Linux: + return "linux" + default: + return fmt.Sprintf("OS(%d)", o) + } +} diff --git a/pkg/abi/flag.go b/pkg/abi/flag.go new file mode 100644 index 000000000..0391ccf37 --- /dev/null +++ b/pkg/abi/flag.go @@ -0,0 +1,76 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package abi + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +// A FlagSet is a slice of bit-flags and their name. +type FlagSet []struct { + Flag uint64 + Name string +} + +// Parse returns a pretty version of val, using the flag names for known flags. +// Unknown flags remain numeric. +func (s FlagSet) Parse(val uint64) string { + var flags []string + + for _, f := range s { + if val&f.Flag == f.Flag { + flags = append(flags, f.Name) + val &^= f.Flag + } + } + + if val != 0 { + flags = append(flags, "0x"+strconv.FormatUint(val, 16)) + } + + return strings.Join(flags, "|") +} + +// ValueSet is a slice of syscall values and their name. Parse will replace +// values that exactly match an entry with its name. +type ValueSet []struct { + Value uint64 + Name string +} + +// Parse returns the name of the value associated with `val`. Unknown values +// are converted to hex. +func (e ValueSet) Parse(val uint64) string { + for _, f := range e { + if val == f.Value { + return f.Name + } + } + return fmt.Sprintf("%#x", val) +} + +// ParseName returns the flag value associated with 'name'. Returns false +// if no value is found. +func (e ValueSet) ParseName(name string) (uint64, bool) { + for _, f := range e { + if name == f.Name { + return f.Value, true + } + } + return math.MaxUint64, false +} diff --git a/pkg/abi/linux/BUILD b/pkg/abi/linux/BUILD new file mode 100644 index 000000000..a428e61a3 --- /dev/null +++ b/pkg/abi/linux/BUILD @@ -0,0 +1,69 @@ +# Package linux contains the constants and types needed to inferface with a +# Linux kernel. It should be used instead of syscall or golang.org/x/sys/unix +# when the host OS may not be Linux. + +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "linux_state", + srcs = [ + "binder.go", + "bpf.go", + "time.go", + "tty.go", + ], + out = "linux_state.go", + package = "linux", +) + +go_library( + name = "linux", + srcs = [ + "aio.go", + "ashmem.go", + "binder.go", + "bpf.go", + "capability.go", + "dev.go", + "elf.go", + "errors.go", + "exec.go", + "file.go", + "fs.go", + "futex.go", + "inotify.go", + "ioctl.go", + "ip.go", + "ipc.go", + "limits.go", + "linux.go", + "linux_state.go", + "mm.go", + "netdevice.go", + "netlink.go", + "netlink_route.go", + "poll.go", + "prctl.go", + "rusage.go", + "sched.go", + "seccomp.go", + "sem.go", + "signal.go", + "socket.go", + "time.go", + "tty.go", + "uio.go", + "utsname.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/abi/linux", + visibility = ["//visibility:public"], + deps = [ + "//pkg/abi", + "//pkg/binary", + "//pkg/bits", + "//pkg/state", + ], +) diff --git a/pkg/abi/linux/aio.go b/pkg/abi/linux/aio.go new file mode 100644 index 000000000..9c39ca2ef --- /dev/null +++ b/pkg/abi/linux/aio.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +const ( + // AIORingSize is sizeof(struct aio_ring). + AIORingSize = 32 +) diff --git a/pkg/abi/linux/ashmem.go b/pkg/abi/linux/ashmem.go new file mode 100644 index 000000000..7fbfd2e68 --- /dev/null +++ b/pkg/abi/linux/ashmem.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Constants used by ashmem in pin-related ioctls. +const ( + AshmemNotPurged = 0 + AshmemWasPurged = 1 + AshmemIsUnpinned = 0 + AshmemIsPinned = 1 +) + +// AshmemPin structure is used for pin-related ioctls. +type AshmemPin struct { + Offset uint32 + Len uint32 +} diff --git a/pkg/abi/linux/binder.go b/pkg/abi/linux/binder.go new file mode 100644 index 000000000..b228898f9 --- /dev/null +++ b/pkg/abi/linux/binder.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// BinderVersion structure is used for BINDER_VERSION ioctl. +type BinderVersion struct { + ProtocolVersion int32 +} diff --git a/pkg/abi/linux/bpf.go b/pkg/abi/linux/bpf.go new file mode 100644 index 000000000..f597ef4f5 --- /dev/null +++ b/pkg/abi/linux/bpf.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// BPFInstruction is a raw BPF virtual machine instruction. +type BPFInstruction struct { + // OpCode is the operation to execute. + OpCode uint16 + + // JumpIfTrue is the number of instructions to skip if OpCode is a + // conditional instruction and the condition is true. + JumpIfTrue uint8 + + // JumpIfFalse is the number of instructions to skip if OpCode is a + // conditional instruction and the condition is false. + JumpIfFalse uint8 + + // K is a constant parameter. The meaning depends on the value of OpCode. + K uint32 +} diff --git a/pkg/abi/linux/capability.go b/pkg/abi/linux/capability.go new file mode 100644 index 000000000..1a1bd0ce3 --- /dev/null +++ b/pkg/abi/linux/capability.go @@ -0,0 +1,104 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// A Capability represents the ability to perform a privileged operation. +type Capability int + +// Capabilities defined by Linux. Taken from the kernel's +// include/uapi/linux/capability.h. See capabilities(7) or that file for more +// detailed capability descriptions. +const ( + CAP_CHOWN = Capability(0) + CAP_DAC_OVERRIDE = Capability(1) + CAP_DAC_READ_SEARCH = Capability(2) + CAP_FOWNER = Capability(3) + CAP_FSETID = Capability(4) + CAP_KILL = Capability(5) + CAP_SETGID = Capability(6) + CAP_SETUID = Capability(7) + CAP_SETPCAP = Capability(8) + CAP_LINUX_IMMUTABLE = Capability(9) + CAP_NET_BIND_SERVICE = Capability(10) + CAP_NET_BROAD_CAST = Capability(11) + CAP_NET_ADMIN = Capability(12) + CAP_NET_RAW = Capability(13) + CAP_IPC_LOCK = Capability(14) + CAP_IPC_OWNER = Capability(15) + CAP_SYS_MODULE = Capability(16) + CAP_SYS_RAWIO = Capability(17) + CAP_SYS_CHROOT = Capability(18) + CAP_SYS_PTRACE = Capability(19) + CAP_SYS_PACCT = Capability(20) + CAP_SYS_ADMIN = Capability(21) + CAP_SYS_BOOT = Capability(22) + CAP_SYS_NICE = Capability(23) + CAP_SYS_RESOURCE = Capability(24) + CAP_SYS_TIME = Capability(25) + CAP_SYS_TTY_CONFIG = Capability(26) + CAP_MKNOD = Capability(27) + CAP_LEASE = Capability(28) + CAP_AUDIT_WRITE = Capability(29) + CAP_AUDIT_CONTROL = Capability(30) + CAP_SETFCAP = Capability(31) + CAP_MAC_OVERRIDE = Capability(32) + CAP_MAC_ADMIN = Capability(33) + CAP_SYSLOG = Capability(34) + CAP_WAKE_ALARM = Capability(35) + CAP_BLOCK_SUSPEND = Capability(36) + + // MaxCapability is the highest-numbered capability. + MaxCapability = Capability(36) // CAP_BLOCK_SUSPEND as of 3.11 +) + +// Ok returns true if cp is a supported capability. +func (cp Capability) Ok() bool { + return cp >= 0 && cp <= MaxCapability +} + +// Version numbers used by the capget/capset syscalls, defined in Linux's +// include/uapi/linux/capability.h. +const ( + // LINUX_CAPABILITY_VERSION_1 causes the data pointer to be + // interpreted as a pointer to a single cap_user_data_t. Since capability + // sets are 64 bits and the "capability sets" in cap_user_data_t are 32 + // bits only, this causes the upper 32 bits to be implicitly 0. + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + + // LINUX_CAPABILITY_VERSION_2 and LINUX_CAPABILITY_VERSION_3 cause the + // data pointer to be interpreted as a pointer to an array of 2 + // cap_user_data_t, using the second to store the 32 MSB of each capability + // set. Versions 2 and 3 are identical, but Linux printk's a warning on use + // of version 2 due to a userspace API defect. + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 + + // HighestCapabilityVersion is the highest supported + // LINUX_CAPABILITY_VERSION_* version. + HighestCapabilityVersion = LINUX_CAPABILITY_VERSION_3 +) + +// CapUserHeader is equivalent to Linux's cap_user_header_t. +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +// CapUserData is equivalent to Linux's cap_user_data_t. +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} diff --git a/pkg/abi/linux/dev.go b/pkg/abi/linux/dev.go new file mode 100644 index 000000000..d3b63063f --- /dev/null +++ b/pkg/abi/linux/dev.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// MakeDeviceID encodes a major and minor device number into a single device ID. +// +// Format (see linux/kdev_t.h:new_encode_dev): +// +// Bits 7:0 - minor bits 7:0 +// Bits 19:8 - major bits 11:0 +// Bits 31:20 - minor bits 19:8 +func MakeDeviceID(major uint16, minor uint32) uint32 { + return (minor & 0xff) | ((uint32(major) & 0xfff) << 8) | ((minor >> 8) << 20) +} + +// Character device IDs. +// +// See Documentations/devices.txt and uapi/linux/major.h. +const ( + // TTYAUX_MAJOR is the major device number for alternate TTY devices. + TTYAUX_MAJOR = 5 + + // UNIX98_PTY_MASTER_MAJOR is the initial major device number for + // Unix98 PTY masters. + UNIX98_PTY_MASTER_MAJOR = 128 + + // UNIX98_PTY_SLAVE_MAJOR is the initial major device number for + // Unix98 PTY slaves. + UNIX98_PTY_SLAVE_MAJOR = 136 +) + +// Minor device numbers for TTYAUX_MAJOR. +const ( + // PTMX_MINOR is the minor device number for /dev/ptmx. + PTMX_MINOR = 2 +) diff --git a/pkg/abi/linux/elf.go b/pkg/abi/linux/elf.go new file mode 100644 index 000000000..76c13b677 --- /dev/null +++ b/pkg/abi/linux/elf.go @@ -0,0 +1,91 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Linux auxiliary vector entry types. +const ( + // AT_NULL is the end of the auxiliary vector. + AT_NULL = 0 + + // AT_IGNORE should be ignored. + AT_IGNORE = 1 + + // AT_EXECFD is the file descriptor of the program. + AT_EXECFD = 2 + + // AT_PHDR points to the program headers. + AT_PHDR = 3 + + // AT_PHENT is the size of a program header entry. + AT_PHENT = 4 + + // AT_PHNUM is the number of program headers. + AT_PHNUM = 5 + + // AT_PAGESZ is the system page size. + AT_PAGESZ = 6 + + // AT_BASE is the base address of the interpreter. + AT_BASE = 7 + + // AT_FLAGS are flags. + AT_FLAGS = 8 + + // AT_ENTRY is the program entry point. + AT_ENTRY = 9 + + // AT_NOTELF indicates that the program is not an ELF binary. + AT_NOTELF = 10 + + // AT_UID is the real UID. + AT_UID = 11 + + // AT_EUID is the effective UID. + AT_EUID = 12 + + // AT_GID is the real GID. + AT_GID = 13 + + // AT_EGID is the effective GID. + AT_EGID = 14 + + // AT_PLATFORM is a string identifying the CPU. + AT_PLATFORM = 15 + + // AT_HWCAP are arch-dependent CPU capabilities. + AT_HWCAP = 16 + + // AT_CLKTCK is the frequency used by times(2). + AT_CLKTCK = 17 + + // AT_SECURE indicate secure mode. + AT_SECURE = 23 + + // AT_BASE_PLATFORM is a string identifying the "real" platform. It may + // differ from AT_PLATFORM. + AT_BASE_PLATFORM = 24 + + // AT_RANDOM points to 16-bytes of random data. + AT_RANDOM = 25 + + // AT_HWCAP2 is an extension of AT_HWCAP. + AT_HWCAP2 = 26 + + // AT_EXECFN is the path used to execute the program. + AT_EXECFN = 31 + + // AT_SYSINFO_EHDR is the address of the VDSO. + AT_SYSINFO_EHDR = 33 +) diff --git a/pkg/abi/linux/errors.go b/pkg/abi/linux/errors.go new file mode 100644 index 000000000..b5ddb2b2f --- /dev/null +++ b/pkg/abi/linux/errors.go @@ -0,0 +1,172 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Errno represents a Linux errno value. +type Errno struct { + number int + name string +} + +// Number returns the errno number. +func (e *Errno) Number() int { + return e.number +} + +// Error implements error.Error. +func (e *Errno) Error() string { + return e.name +} + +// Errno values from include/uapi/asm-generic/errno-base.h. +var ( + EPERM = &Errno{1, "operation not permitted"} + ENOENT = &Errno{2, "no such file or directory"} + ESRCH = &Errno{3, "no such process"} + EINTR = &Errno{4, "interrupted system call"} + EIO = &Errno{5, "I/O error"} + ENXIO = &Errno{6, "no such device or address"} + E2BIG = &Errno{7, "argument list too long"} + ENOEXEC = &Errno{8, "exec format error"} + EBADF = &Errno{9, "bad file number"} + ECHILD = &Errno{10, "no child processes"} + EAGAIN = &Errno{11, "try again"} + ENOMEM = &Errno{12, "out of memory"} + EACCES = &Errno{13, "permission denied"} + EFAULT = &Errno{14, "bad address"} + ENOTBLK = &Errno{15, "block device required"} + EBUSY = &Errno{16, "device or resource busy"} + EEXIST = &Errno{17, "file exists"} + EXDEV = &Errno{18, "cross-device link"} + ENODEV = &Errno{19, "no such device"} + ENOTDIR = &Errno{20, "not a directory"} + EISDIR = &Errno{21, "is a directory"} + EINVAL = &Errno{22, "invalid argument"} + ENFILE = &Errno{23, "file table overflow"} + EMFILE = &Errno{24, "too many open files"} + ENOTTY = &Errno{25, "not a typewriter"} + ETXTBSY = &Errno{26, "text file busy"} + EFBIG = &Errno{27, "file too large"} + ENOSPC = &Errno{28, "no space left on device"} + ESPIPE = &Errno{29, "illegal seek"} + EROFS = &Errno{30, "read-only file system"} + EMLINK = &Errno{31, "too many links"} + EPIPE = &Errno{32, "broken pipe"} + EDOM = &Errno{33, "math argument out of domain of func"} + ERANGE = &Errno{34, "math result not representable"} +) + +// Errno values from include/uapi/asm-generic/errno.h. +var ( + EDEADLK = &Errno{35, "resource deadlock would occur"} + ENAMETOOLONG = &Errno{36, "file name too long"} + ENOLCK = &Errno{37, "no record locks available"} + ENOSYS = &Errno{38, "invalid system call number"} + ENOTEMPTY = &Errno{39, "directory not empty"} + ELOOP = &Errno{40, "too many symbolic links encountered"} + EWOULDBLOCK = &Errno{EAGAIN.number, "operation would block"} + ENOMSG = &Errno{42, "no message of desired type"} + EIDRM = &Errno{43, "identifier removed"} + ECHRNG = &Errno{44, "channel number out of range"} + EL2NSYNC = &Errno{45, "level 2 not synchronized"} + EL3HLT = &Errno{46, "level 3 halted"} + EL3RST = &Errno{47, "level 3 reset"} + ELNRNG = &Errno{48, "link number out of range"} + EUNATCH = &Errno{49, "protocol driver not attached"} + ENOCSI = &Errno{50, "no CSI structure available"} + EL2HLT = &Errno{51, "level 2 halted"} + EBADE = &Errno{52, "invalid exchange"} + EBADR = &Errno{53, "invalid request descriptor"} + EXFULL = &Errno{54, "exchange full"} + ENOANO = &Errno{55, "no anode"} + EBADRQC = &Errno{56, "invalid request code"} + EBADSLT = &Errno{57, "invalid slot"} + EDEADLOCK = EDEADLK + EBFONT = &Errno{59, "bad font file format"} + ENOSTR = &Errno{60, "device not a stream"} + ENODATA = &Errno{61, "no data available"} + ETIME = &Errno{62, "timer expired"} + ENOSR = &Errno{63, "out of streams resources"} + ENONET = &Errno{64, "machine is not on the network"} + ENOPKG = &Errno{65, "package not installed"} + EREMOTE = &Errno{66, "object is remote"} + ENOLINK = &Errno{67, "link has been severed"} + EADV = &Errno{68, "advertise error"} + ESRMNT = &Errno{69, "srmount error"} + ECOMM = &Errno{70, "communication error on send"} + EPROTO = &Errno{71, "protocol error"} + EMULTIHOP = &Errno{72, "multihop attempted"} + EDOTDOT = &Errno{73, "RFS specific error"} + EBADMSG = &Errno{74, "not a data message"} + EOVERFLOW = &Errno{75, "value too large for defined data type"} + ENOTUNIQ = &Errno{76, "name not unique on network"} + EBADFD = &Errno{77, "file descriptor in bad state"} + EREMCHG = &Errno{78, "remote address changed"} + ELIBACC = &Errno{79, "can not access a needed shared library"} + ELIBBAD = &Errno{80, "accessing a corrupted shared library"} + ELIBSCN = &Errno{81, ".lib section in a.out corrupted"} + ELIBMAX = &Errno{82, "attempting to link in too many shared libraries"} + ELIBEXEC = &Errno{83, "cannot exec a shared library directly"} + EILSEQ = &Errno{84, "illegal byte sequence"} + ERESTART = &Errno{85, "interrupted system call should be restarted"} + ESTRPIPE = &Errno{86, "streams pipe error"} + EUSERS = &Errno{87, "too many users"} + ENOTSOCK = &Errno{88, "socket operation on non-socket"} + EDESTADDRREQ = &Errno{89, "destination address required"} + EMSGSIZE = &Errno{90, "message too long"} + EPROTOTYPE = &Errno{91, "protocol wrong type for socket"} + ENOPROTOOPT = &Errno{92, "protocol not available"} + EPROTONOSUPPORT = &Errno{93, "protocol not supported"} + ESOCKTNOSUPPORT = &Errno{94, "socket type not supported"} + EOPNOTSUPP = &Errno{95, "operation not supported on transport endpoint"} + EPFNOSUPPORT = &Errno{96, "protocol family not supported"} + EAFNOSUPPORT = &Errno{97, "address family not supported by protocol"} + EADDRINUSE = &Errno{98, "address already in use"} + EADDRNOTAVAIL = &Errno{99, "cannot assign requested address"} + ENETDOWN = &Errno{100, "network is down"} + ENETUNREACH = &Errno{101, "network is unreachable"} + ENETRESET = &Errno{102, "network dropped connection because of reset"} + ECONNABORTED = &Errno{103, "software caused connection abort"} + ECONNRESET = &Errno{104, "connection reset by peer"} + ENOBUFS = &Errno{105, "no buffer space available"} + EISCONN = &Errno{106, "transport endpoint is already connected"} + ENOTCONN = &Errno{107, "transport endpoint is not connected"} + ESHUTDOWN = &Errno{108, "cannot send after transport endpoint shutdown"} + ETOOMANYREFS = &Errno{109, "too many references: cannot splice"} + ETIMEDOUT = &Errno{110, "connection timed out"} + ECONNREFUSED = &Errno{111, "connection refused"} + EHOSTDOWN = &Errno{112, "host is down"} + EHOSTUNREACH = &Errno{113, "no route to host"} + EALREADY = &Errno{114, "operation already in progress"} + EINPROGRESS = &Errno{115, "operation now in progress"} + ESTALE = &Errno{116, "stale file handle"} + EUCLEAN = &Errno{117, "structure needs cleaning"} + ENOTNAM = &Errno{118, "not a XENIX named type file"} + ENAVAIL = &Errno{119, "no XENIX semaphores available"} + EISNAM = &Errno{120, "is a named type file"} + EREMOTEIO = &Errno{121, "remote I/O error"} + EDQUOT = &Errno{122, "quota exceeded"} + ENOMEDIUM = &Errno{123, "no medium found"} + EMEDIUMTYPE = &Errno{124, "wrong medium type"} + ECANCELED = &Errno{125, "operation Canceled"} + ENOKEY = &Errno{126, "required key not available"} + EKEYEXPIRED = &Errno{127, "key has expired"} + EKEYREVOKED = &Errno{128, "key has been revoked"} + EKEYREJECTED = &Errno{129, "key was rejected by service"} + EOWNERDEAD = &Errno{130, "owner died"} + ENOTRECOVERABLE = &Errno{131, "state not recoverable"} + ERFKILL = &Errno{132, "operation not possible due to RF-kill"} + EHWPOISON = &Errno{133, "memory page has hardware error"} +) diff --git a/pkg/abi/linux/exec.go b/pkg/abi/linux/exec.go new file mode 100644 index 000000000..4d81eca54 --- /dev/null +++ b/pkg/abi/linux/exec.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// TASK_COMM_LEN is the task command name length. +const TASK_COMM_LEN = 16 diff --git a/pkg/abi/linux/file.go b/pkg/abi/linux/file.go new file mode 100644 index 000000000..44672647b --- /dev/null +++ b/pkg/abi/linux/file.go @@ -0,0 +1,232 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "fmt" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/abi" +) + +// Constants for open(2). +const ( + O_NONBLOCK = 00004000 + O_CLOEXEC = 02000000 + O_PATH = 010000000 +) + +// Constants for fstatat(2). +const ( + AT_SYMLINK_NOFOLLOW = 0x100 +) + +// Constants for mount(2). +const ( + MS_RDONLY = 0x1 + MS_NOSUID = 0x2 + MS_NODEV = 0x4 + MS_NOEXEC = 0x8 + MS_SYNCHRONOUS = 0x10 + MS_REMOUNT = 0x20 + MS_MANDLOCK = 0x40 + MS_DIRSYNC = 0x80 + MS_NOATIME = 0x400 + MS_NODIRATIME = 0x800 + MS_BIND = 0x1000 + MS_MOVE = 0x2000 + MS_REC = 0x4000 + + MS_POSIXACL = 0x10000 + MS_UNBINDABLE = 0x20000 + MS_PRIVATE = 0x40000 + MS_SLAVE = 0x80000 + MS_SHARED = 0x100000 + MS_RELATIME = 0x200000 + MS_KERNMOUNT = 0x400000 + MS_I_VERSION = 0x800000 + MS_STRICTATIME = 0x1000000 + + MS_MGC_VAL = 0xC0ED0000 + MS_MGC_MSK = 0xffff0000 +) + +// Constants for umount2(2). +const ( + MNT_FORCE = 0x1 + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + UMOUNT_NOFOLLOW = 0x8 +) + +// Constants for unlinkat(2). +const ( + AT_REMOVEDIR = 0x200 +) + +// Constants for linkat(2) and fchownat(2). +const ( + AT_SYMLINK_FOLLOW = 0x400 + AT_EMPTY_PATH = 0x1000 +) + +// Constants for all file-related ...at(2) syscalls. +const ( + AT_FDCWD = -100 +) + +// Special values for the ns field in utimensat(2). +const ( + UTIME_NOW = ((1 << 30) - 1) + UTIME_OMIT = ((1 << 30) - 2) +) + +// MaxSymlinkTraversals is the maximum number of links that will be followed by +// the kernel to resolve a symlink. +const MaxSymlinkTraversals = 40 + +// Constants for flock(2). +const ( + LOCK_SH = 1 // shared lock + LOCK_EX = 2 // exclusive lock + LOCK_NB = 4 // or'd with one of the above to prevent blocking + LOCK_UN = 8 // remove lock +) + +// Values for mode_t. +const ( + FileTypeMask = 0170000 + ModeSocket = 0140000 + ModeSymlink = 0120000 + ModeRegular = 0100000 + ModeBlockDevice = 060000 + ModeDirectory = 040000 + ModeCharacterDevice = 020000 + ModeNamedPipe = 010000 + + ModeSetUID = 04000 + ModeSetGID = 02000 + ModeSticky = 01000 + + ModeUserAll = 0700 + ModeUserRead = 0400 + ModeUserWrite = 0200 + ModeUserExec = 0100 + ModeGroupAll = 0070 + ModeGroupRead = 0040 + ModeGroupWrite = 0020 + ModeGroupExec = 0010 + ModeOtherAll = 0007 + ModeOtherRead = 0004 + ModeOtherWrite = 0002 + ModeOtherExec = 0001 + PermissionsMask = 0777 +) + +// Stat represents struct stat. +type Stat struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + UID uint32 + GID uint32 + X_pad0 int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 + ATime Timespec + MTime Timespec + CTime Timespec + X_unused [3]int64 +} + +// FileMode represents a mode_t. +type FileMode uint + +// Permissions returns just the permission bits. +func (m FileMode) Permissions() FileMode { + return m & PermissionsMask +} + +// FileType returns just the file type bits. +func (m FileMode) FileType() FileMode { + return m & FileTypeMask +} + +// ExtraBits returns everything but the file type and permission bits. +func (m FileMode) ExtraBits() FileMode { + return m &^ (PermissionsMask | FileTypeMask) +} + +// String returns a string representation of m. +func (m FileMode) String() string { + var s []string + if ft := m.FileType(); ft != 0 { + s = append(s, fileType.Parse(uint64(ft))) + } + if eb := m.ExtraBits(); eb != 0 { + s = append(s, modeExtraBits.Parse(uint64(eb))) + } + s = append(s, fmt.Sprintf("0o%o", m.Permissions())) + return strings.Join(s, "|") +} + +var modeExtraBits = abi.FlagSet{ + { + Flag: ModeSetUID, + Name: "S_ISUID", + }, + { + Flag: ModeSetGID, + Name: "S_ISGID", + }, + { + Flag: ModeSticky, + Name: "S_ISVTX", + }, +} + +var fileType = abi.ValueSet{ + { + Value: ModeSocket, + Name: "S_IFSOCK", + }, + { + Value: ModeSymlink, + Name: "S_IFLINK", + }, + { + Value: ModeRegular, + Name: "S_IFREG", + }, + { + Value: ModeBlockDevice, + Name: "S_IFBLK", + }, + { + Value: ModeDirectory, + Name: "S_IFDIR", + }, + { + Value: ModeCharacterDevice, + Name: "S_IFCHR", + }, + { + Value: ModeNamedPipe, + Name: "S_IFIFO", + }, +} diff --git a/pkg/abi/linux/fs.go b/pkg/abi/linux/fs.go new file mode 100644 index 000000000..00b239351 --- /dev/null +++ b/pkg/abi/linux/fs.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Filesystem types used in statfs(2). +// +// See linux/magic.h. +const ( + ANON_INODE_FS_MAGIC = 0x09041934 + DEVPTS_SUPER_MAGIC = 0x00001cd1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 + PIPEFS_MAGIC = 0x50495045 + RAMFS_MAGIC = 0x09041934 + SOCKFS_MAGIC = 0x534F434B + TMPFS_MAGIC = 0x01021994 + V9FS_MAGIC = 0x01021997 +) + +// Statfs is struct statfs, from uapi/asm-generic/statfs.h. +type Statfs struct { + // Type is one of the filesystem magic values, defined above. + Type uint64 + + // BlockSize is the data block size. + BlockSize int64 + + // Blocks is the number of data blocks in use. + Blocks uint64 + + // BlocksFree is the number of free blocks. + BlocksFree uint64 + + // BlocksAvailable is the number of blocks free for use by + // unprivileged users. + BlocksAvailable uint64 + + // Files is the number of used file nodes on the filesystem. + Files uint64 + + // FileFress is the number of free file nodes on the filesystem. + FilesFree uint64 + + // FSID is the filesystem ID. + FSID [2]int32 + + // NameLength is the maximum file name length. + NameLength uint64 + + // FragmentSize is equivalent to BlockSize. + FragmentSize int64 + + // Flags is the set of filesystem mount flags. + Flags uint64 + + // Spare is unused. + Spare [4]uint64 +} diff --git a/pkg/abi/linux/futex.go b/pkg/abi/linux/futex.go new file mode 100644 index 000000000..f63f5200c --- /dev/null +++ b/pkg/abi/linux/futex.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// From <linux/futex.h> and <sys/time.h>. +// Flags are used in syscall futex(2). +const ( + FUTEX_WAIT = 0 + FUTEX_WAKE = 1 + FUTEX_FD = 2 + FUTEX_REQUEUE = 3 + FUTEX_CMP_REQUEUE = 4 + FUTEX_WAKE_OP = 5 + FUTEX_LOCK_PI = 6 + FUTEX_UNLOCK_PI = 7 + FUTEX_TRYLOCK_PI = 8 + FUTEX_WAIT_BITSET = 9 + FUTEX_WAKE_BITSET = 10 + FUTEX_WAIT_REQUEUE_PI = 11 + FUTEX_CMP_REQUEUE_PI = 12 + + FUTEX_PRIVATE_FLAG = 128 + FUTEX_CLOCK_REALTIME = 256 +) + +// These are flags are from <linux/futex.h> and are used in FUTEX_WAKE_OP +// to define the operations. +const ( + FUTEX_OP_SET = 0 + FUTEX_OP_ADD = 1 + FUTEX_OP_OR = 2 + FUTEX_OP_ANDN = 3 + FUTEX_OP_XOR = 4 + FUTEX_OP_OPARG_SHIFT = 8 + FUTEX_OP_CMP_EQ = 0 + FUTEX_OP_CMP_NE = 1 + FUTEX_OP_CMP_LT = 2 + FUTEX_OP_CMP_LE = 3 + FUTEX_OP_CMP_GT = 4 + FUTEX_OP_CMP_GE = 5 +) + +// FUTEX_TID_MASK is the TID portion of a PI futex word. +const FUTEX_TID_MASK = 0x3fffffff diff --git a/pkg/abi/linux/inotify.go b/pkg/abi/linux/inotify.go new file mode 100644 index 000000000..072a2d146 --- /dev/null +++ b/pkg/abi/linux/inotify.go @@ -0,0 +1,97 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Inotify events observable by userspace. These directly correspond to +// filesystem operations and there may only be a single of them per inotify +// event read from an inotify fd. +const ( + // IN_ACCESS indicates a file was accessed. + IN_ACCESS = 0x00000001 + // IN_MODIFY indicates a file was modified. + IN_MODIFY = 0x00000002 + // IN_ATTRIB indicates a watch target's metadata changed. + IN_ATTRIB = 0x00000004 + // IN_CLOSE_WRITE indicates a writable file was closed. + IN_CLOSE_WRITE = 0x00000008 + // IN_CLOSE_NOWRITE indicates a non-writable file was closed. + IN_CLOSE_NOWRITE = 0x00000010 + // IN_OPEN indicates a file was opened. + IN_OPEN = 0x00000020 + // IN_MOVED_FROM indicates a file was moved from X. + IN_MOVED_FROM = 0x00000040 + // IN_MOVED_TO indicates a file was moved to Y. + IN_MOVED_TO = 0x00000080 + // IN_CREATE indicates a file was created in a watched directory. + IN_CREATE = 0x00000100 + // IN_DELETE indicates a file was deleted in a watched directory. + IN_DELETE = 0x00000200 + // IN_DELETE_SELF indicates a watch target itself was deleted. + IN_DELETE_SELF = 0x00000400 + // IN_MOVE_SELF indicates a watch target itself was moved. + IN_MOVE_SELF = 0x00000800 + // IN_ALL_EVENTS is a mask for all observable userspace events. + IN_ALL_EVENTS = 0x00000fff +) + +// Inotify control events. These may be present in their own events, or ORed +// with other observable events. +const ( + // IN_UNMOUNT indicates the backing filesystem was unmounted. + IN_UNMOUNT = 0x00002000 + // IN_Q_OVERFLOW indicates the event queued overflowed. + IN_Q_OVERFLOW = 0x00004000 + // IN_IGNORED indicates a watch was removed, either implicitly or through + // inotify_rm_watch(2). + IN_IGNORED = 0x00008000 + // IN_ISDIR indicates the subject of an event was a directory. + IN_ISDIR = 0x40000000 +) + +// Feature flags for inotify_add_watch(2). +const ( + // IN_ONLYDIR indicates that a path should be watched only if it's a + // directory. + IN_ONLYDIR = 0x01000000 + // IN_DONT_FOLLOW indicates that the watch path shouldn't be resolved if + // it's a symlink. + IN_DONT_FOLLOW = 0x02000000 + // IN_EXCL_UNLINK indicates events to this watch from unlinked objects + // should be filtered out. + IN_EXCL_UNLINK = 0x04000000 + // IN_MASK_ADD indicates the provided mask should be ORed into any existing + // watch on the provided path. + IN_MASK_ADD = 0x20000000 + // IN_ONESHOT indicates the watch should be removed after one event. + IN_ONESHOT = 0x80000000 +) + +// Feature flags for inotify_init1(2). +const ( + // IN_CLOEXEC is an alias for O_CLOEXEC. It indicates that the inotify + // fd should be closed on exec(2) and friends. + IN_CLOEXEC = 0x00080000 + // IN_NONBLOCK is an alias for O_NONBLOCK. It indicates I/O syscall on the + // inotify fd should not block. + IN_NONBLOCK = 0x00000800 +) + +// ALL_INOTIFY_BITS contains all the bits for all possible inotify events. It's +// defined in the Linux source at "include/linux/inotify.h". +const ALL_INOTIFY_BITS = IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | + IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | IN_MOVED_TO | IN_CREATE | + IN_DELETE | IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | IN_Q_OVERFLOW | + IN_IGNORED | IN_ONLYDIR | IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | + IN_ISDIR | IN_ONESHOT diff --git a/pkg/abi/linux/ioctl.go b/pkg/abi/linux/ioctl.go new file mode 100644 index 000000000..35cefbdfc --- /dev/null +++ b/pkg/abi/linux/ioctl.go @@ -0,0 +1,57 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// ioctl(2) requests provided by asm-generic/ioctls.h +// +// These are ordered by request number (low byte). +const ( + TCGETS = 0x00005401 + TCSETS = 0x00005402 + TCSETSW = 0x00005403 + TIOCINQ = 0x0000541b + TIOCOUTQ = 0x00005411 + FIONREAD = TIOCINQ + FIONBIO = 0x00005421 + TIOCGPTN = 0x80045430 + TIOCSPTLCK = 0x40045431 + FIONCLEX = 0x00005450 + FIOCLEX = 0x00005451 +) + +// ioctl(2) requests provided by uapi/linux/android/binder.h +const ( + BinderWriteReadIoctl = 0xc0306201 + BinderSetIdleTimeoutIoctl = 0x40086203 + BinderSetMaxThreadsIoctl = 0x40046205 + BinderSetIdlePriorityIoctl = 0x40046206 + BinderSetContextMgrIoctl = 0x40046207 + BinderThreadExitIoctl = 0x40046208 + BinderVersionIoctl = 0xc0046209 +) + +// ioctl(2) requests provided by drivers/staging/android/uapi/ashmem.h +const ( + AshmemSetNameIoctl = 0x41007701 + AshmemGetNameIoctl = 0x81007702 + AshmemSetSizeIoctl = 0x40087703 + AshmemGetSizeIoctl = 0x00007704 + AshmemSetProtMaskIoctl = 0x40087705 + AshmemGetProtMaskIoctl = 0x00007706 + AshmemPinIoctl = 0x40087707 + AshmemUnpinIoctl = 0x40087708 + AshmemGetPinStatusIoctl = 0x00007709 + AshmemPurgeAllCachesIoctl = 0x0000770a +) diff --git a/pkg/abi/linux/ip.go b/pkg/abi/linux/ip.go new file mode 100644 index 000000000..6b68999ab --- /dev/null +++ b/pkg/abi/linux/ip.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// IP protocols +const ( + IPPROTO_IP = 0 + IPPROTO_ICMP = 1 + IPPROTO_IGMP = 2 + IPPROTO_IPIP = 4 + IPPROTO_TCP = 6 + IPPROTO_EGP = 8 + IPPROTO_PUP = 12 + IPPROTO_UDP = 17 + IPPROTO_IDP = 22 + IPPROTO_TP = 29 + IPPROTO_DCCP = 33 + IPPROTO_IPV6 = 41 + IPPROTO_RSVP = 46 + IPPROTO_GRE = 47 + IPPROTO_ESP = 50 + IPPROTO_AH = 51 + IPPROTO_MTP = 92 + IPPROTO_BEETPH = 94 + IPPROTO_ENCAP = 98 + IPPROTO_PIM = 103 + IPPROTO_COMP = 108 + IPPROTO_SCTP = 132 + IPPROTO_UDPLITE = 136 + IPPROTO_MPLS = 137 + IPPROTO_RAW = 255 +) diff --git a/pkg/abi/linux/ipc.go b/pkg/abi/linux/ipc.go new file mode 100644 index 000000000..5441492e7 --- /dev/null +++ b/pkg/abi/linux/ipc.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Control commands used with semctl. Source: //include/uapi/linux/ipc.h. +const ( + IPC_RMID = 0 + IPC_SET = 1 + IPC_STAT = 2 + IPC_INFO = 3 +) + +// resource get request flags. Source: //include/uapi/linux/ipc.h +const ( + IPC_CREAT = 00001000 + IPC_EXCL = 00002000 + IPC_NOWAIT = 00004000 +) + +const IPC_PRIVATE = 0 + +// IPCPerm is equivalent to struct ipc_perm. +type IPCPerm struct { + Key uint32 + UID uint32 + GID uint32 + CUID uint32 + CGID uint32 + Mode uint16 + pad1 uint16 + Seq uint16 + pad2 uint16 + reserved1 uint32 + reserved2 uint32 +} diff --git a/pkg/abi/linux/limits.go b/pkg/abi/linux/limits.go new file mode 100644 index 000000000..e1f0932ec --- /dev/null +++ b/pkg/abi/linux/limits.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Resources for getrlimit(2)/setrlimit(2)/prlimit(2). +const ( + RLIMIT_CPU = 0 + RLIMIT_FSIZE = 1 + RLIMIT_DATA = 2 + RLIMIT_STACK = 3 + RLIMIT_CORE = 4 + RLIMIT_RSS = 5 + RLIMIT_NPROC = 6 + RLIMIT_NOFILE = 7 + RLIMIT_MEMLOCK = 8 + RLIMIT_AS = 9 + RLIMIT_LOCKS = 10 + RLIMIT_SIGPENDING = 11 + RLIMIT_MSGQUEUE = 12 + RLIMIT_NICE = 13 + RLIMIT_RTPRIO = 14 + RLIMIT_RTTIME = 15 +) + +// RLimit corresponds to Linux's struct rlimit. +type RLimit struct { + // Cur specifies the soft limit. + Cur uint64 + // Max specifies the hard limit. + Max uint64 +} + +const ( + // RLimInfinity is RLIM_INFINITY on Linux. + RLimInfinity = ^uint64(0) + + // DefaultStackSoftLimit is called _STK_LIM in Linux. + DefaultStackSoftLimit = 8 * 1024 * 1024 + + // DefaultNprocLimit is defined in kernel/fork.c:set_max_threads, and + // called MAX_THREADS / 2 in Linux. + DefaultNprocLimit = FUTEX_TID_MASK / 2 + + // DefaultNofileSoftLimit is called INR_OPEN_CUR in Linux. + DefaultNofileSoftLimit = 1024 + + // DefaultNofileHardLimit is called INR_OPEN_MAX in Linux. + DefaultNofileHardLimit = 4096 + + // DefaultMemlockLimit is called MLOCK_LIMIT in Linux. + DefaultMemlockLimit = 64 * 1094 + + // DefaultMsgqueueLimit is called MQ_BYTES_MAX in Linux. + DefaultMsgqueueLimit = 819200 +) + +// InitRLimits is a map of initial rlimits set by Linux in +// include/asm-generic/resource.h. +var InitRLimits = map[int]RLimit{ + RLIMIT_CPU: {RLimInfinity, RLimInfinity}, + RLIMIT_FSIZE: {RLimInfinity, RLimInfinity}, + RLIMIT_DATA: {RLimInfinity, RLimInfinity}, + RLIMIT_STACK: {DefaultStackSoftLimit, RLimInfinity}, + RLIMIT_CORE: {0, RLimInfinity}, + RLIMIT_RSS: {RLimInfinity, RLimInfinity}, + RLIMIT_NPROC: {DefaultNprocLimit, DefaultNprocLimit}, + RLIMIT_NOFILE: {DefaultNofileSoftLimit, DefaultNofileHardLimit}, + RLIMIT_MEMLOCK: {DefaultMemlockLimit, DefaultMemlockLimit}, + RLIMIT_AS: {RLimInfinity, RLimInfinity}, + RLIMIT_LOCKS: {RLimInfinity, RLimInfinity}, + RLIMIT_SIGPENDING: {0, 0}, + RLIMIT_MSGQUEUE: {DefaultMsgqueueLimit, DefaultMsgqueueLimit}, + RLIMIT_NICE: {0, 0}, + RLIMIT_RTPRIO: {0, 0}, + RLIMIT_RTTIME: {RLimInfinity, RLimInfinity}, +} diff --git a/pkg/abi/linux/linux.go b/pkg/abi/linux/linux.go new file mode 100644 index 000000000..a946849c5 --- /dev/null +++ b/pkg/abi/linux/linux.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package linux contains the constants and types needed to inferface with a Linux kernel. +package linux + +// NumSoftIRQ is the number of software IRQs, exposed via /proc/stat. +// +// Defined in linux/interrupt.h. +const NumSoftIRQ = 10 + +// Sysinfo is the structure provided by sysinfo on linux versions > 2.3.48. +type Sysinfo struct { + Uptime int64 + Loads [3]uint64 + TotalRAM uint64 + FreeRAM uint64 + SharedRAM uint64 + BufferRAM uint64 + TotalSwap uint64 + FreeSwap uint64 + Procs uint16 + TotalHigh uint64 + FreeHigh uint64 + Unit uint32 + /* The _f field in the glibc version of Sysinfo has size 0 on AMD64 */ +} diff --git a/pkg/abi/linux/mm.go b/pkg/abi/linux/mm.go new file mode 100644 index 000000000..2263653cc --- /dev/null +++ b/pkg/abi/linux/mm.go @@ -0,0 +1,103 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Protections for mmap(2). +const ( + PROT_NONE = 0 + PROT_READ = 1 << 0 + PROT_WRITE = 1 << 1 + PROT_EXEC = 1 << 2 + PROT_SEM = 1 << 3 + PROT_GROWSDOWN = 1 << 24 + PROT_GROWSUP = 1 << 25 +) + +// Flags for mmap(2). +const ( + MAP_SHARED = 1 << 0 + MAP_PRIVATE = 1 << 1 + MAP_FIXED = 1 << 4 + MAP_ANONYMOUS = 1 << 5 + MAP_GROWSDOWN = 1 << 8 + MAP_DENYWRITE = 1 << 11 + MAP_EXECUTABLE = 1 << 12 + MAP_LOCKED = 1 << 13 + MAP_NORESERVE = 1 << 14 + MAP_POPULATE = 1 << 15 + MAP_NONBLOCK = 1 << 16 + MAP_STACK = 1 << 17 + MAP_HUGETLB = 1 << 18 +) + +// Flags for mremap(2). +const ( + MREMAP_MAYMOVE = 1 << 0 + MREMAP_FIXED = 1 << 1 +) + +// Advice for madvise(2). +const ( + MADV_NORMAL = 0 + MADV_RANDOM = 1 + MADV_SEQUENTIAL = 2 + MADV_WILLNEED = 3 + MADV_DONTNEED = 4 + MADV_REMOVE = 9 + MADV_DONTFORK = 10 + MADV_DOFORK = 11 + MADV_MERGEABLE = 12 + MADV_UNMERGEABLE = 13 + MADV_HUGEPAGE = 14 + MADV_NOHUGEPAGE = 15 + MADV_DONTDUMP = 16 + MADV_DODUMP = 17 + MADV_HWPOISON = 100 + MADV_SOFT_OFFLINE = 101 + MADV_NOMAJFAULT = 200 + MADV_DONTCHGME = 201 +) + +// Flags for msync(2). +const ( + MS_ASYNC = 1 << 0 + MS_INVALIDATE = 1 << 1 + MS_SYNC = 1 << 2 +) + +// Policies for get_mempolicy(2)/set_mempolicy(2). +const ( + MPOL_DEFAULT = 0 + MPOL_PREFERRED = 1 + MPOL_BIND = 2 + MPOL_INTERLEAVE = 3 + MPOL_LOCAL = 4 + MPOL_MAX = 5 +) + +// Flags for get_mempolicy(2). +const ( + MPOL_F_NODE = 1 << 0 + MPOL_F_ADDR = 1 << 1 + MPOL_F_MEMS_ALLOWED = 1 << 2 +) + +// Flags for set_mempolicy(2). +const ( + MPOL_F_RELATIVE_NODES = 1 << 14 + MPOL_F_STATIC_NODES = 1 << 15 + + MPOL_MODE_FLAGS = (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) +) diff --git a/pkg/abi/linux/netdevice.go b/pkg/abi/linux/netdevice.go new file mode 100644 index 000000000..88654a1b3 --- /dev/null +++ b/pkg/abi/linux/netdevice.go @@ -0,0 +1,86 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import "gvisor.googlesource.com/gvisor/pkg/binary" + +const ( + // IFNAMSIZ is the size of the name field for IFReq. + IFNAMSIZ = 16 +) + +// IFReq is an interface request. +type IFReq struct { + // IFName is an encoded name, normally null-terminated. This should be + // accessed via the Name and SetName functions. + IFName [IFNAMSIZ]byte + + // Data is the union of the following structures: + // + // struct sockaddr ifr_addr; + // struct sockaddr ifr_dstaddr; + // struct sockaddr ifr_broadaddr; + // struct sockaddr ifr_netmask; + // struct sockaddr ifr_hwaddr; + // short ifr_flags; + // int ifr_ifindex; + // int ifr_metric; + // int ifr_mtu; + // struct ifmap ifr_map; + // char ifr_slave[IFNAMSIZ]; + // char ifr_newname[IFNAMSIZ]; + // char *ifr_data; + Data [24]byte +} + +// Name returns the name. +func (ifr *IFReq) Name() string { + for c := 0; c < len(ifr.IFName); c++ { + if ifr.IFName[c] == 0 { + return string(ifr.IFName[:c]) + } + } + return string(ifr.IFName[:]) +} + +// SetName sets the name. +func (ifr *IFReq) SetName(name string) { + n := copy(ifr.IFName[:], []byte(name)) + for i := n; i < len(ifr.IFName); i++ { + ifr.IFName[i] = 0 + } +} + +// SizeOfIFReq is the binary size of an IFReq struct (40 bytes). +var SizeOfIFReq = binary.Size(IFReq{}) + +// IFMap contains interface hardware parameters. +type IFMap struct { + MemStart uint64 + MemEnd uint64 + BaseAddr int16 + IRQ byte + DMA byte + Port byte + _ [3]byte // Pad to sizeof(struct ifmap). +} + +// IFConf is used to return a list of interfaces and their addresses. See +// netdevice(7) and struct ifconf for more detail on its use. +type IFConf struct { + Len int32 + _ [4]byte // Pad to sizeof(struct ifconf). + Ptr uint64 +} diff --git a/pkg/abi/linux/netlink.go b/pkg/abi/linux/netlink.go new file mode 100644 index 000000000..e823ffa7e --- /dev/null +++ b/pkg/abi/linux/netlink.go @@ -0,0 +1,110 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Netlink protocols, from uapi/linux/netlink.h. +const ( + NETLINK_ROUTE = 0 + NETLINK_UNUSED = 1 + NETLINK_USERSOCK = 2 + NETLINK_FIREWALL = 3 + NETLINK_SOCK_DIAG = 4 + NETLINK_NFLOG = 5 + NETLINK_XFRM = 6 + NETLINK_SELINUX = 7 + NETLINK_ISCSI = 8 + NETLINK_AUDIT = 9 + NETLINK_FIB_LOOKUP = 10 + NETLINK_CONNECTOR = 11 + NETLINK_NETFILTER = 12 + NETLINK_IP6_FW = 13 + NETLINK_DNRTMSG = 14 + NETLINK_KOBJECT_UEVENT = 15 + NETLINK_GENERIC = 16 + NETLINK_SCSITRANSPORT = 18 + NETLINK_ECRYPTFS = 19 + NETLINK_RDMA = 20 + NETLINK_CRYPTO = 21 +) + +// SockAddrNetlink is struct sockaddr_nl, from uapi/linux/netlink.h. +type SockAddrNetlink struct { + Family uint16 + Padding uint16 + PortID uint32 + Groups uint32 +} + +// SockAddrNetlinkSize is the size of SockAddrNetlink. +const SockAddrNetlinkSize = 12 + +// NetlinkMessageHeader is struct nlmsghdr, from uapi/linux/netlink.h. +type NetlinkMessageHeader struct { + Length uint32 + Type uint16 + Flags uint16 + Seq uint32 + PortID uint32 +} + +// NetlinkMessageHeaderSize is the size of NetlinkMessageHeader. +const NetlinkMessageHeaderSize = 16 + +// Netlink message header flags, from uapi/linux/netlink.h. +const ( + NLM_F_REQUEST = 0x1 + NLM_F_MULTI = 0x2 + NLM_F_ACK = 0x4 + NLM_F_ECHO = 0x8 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ROOT = 0x100 + NLM_F_MATCH = 0x200 + NLM_F_ATOMIC = 0x400 + NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH + NLM_F_REPLACE = 0x100 + NLM_F_EXCL = 0x200 + NLM_F_CREATE = 0x400 + NLM_F_APPEND = 0x800 +) + +// Standard netlink message types, from uapi/linux/netlink.h. +const ( + NLMSG_NOOP = 0x1 + NLMSG_ERROR = 0x2 + NLMSG_DONE = 0x3 + NLMSG_OVERRUN = 0x4 + + // NLMSG_MIN_TYPE is the first value for protocol-level types. + NLMSG_MIN_TYPE = 0x10 +) + +// NLMSG_ALIGNTO is the alignment of netlink messages, from +// uapi/linux/netlink.h. +const NLMSG_ALIGNTO = 4 + +// NetlinkAttrHeader is the header of a netlink attribute, followed by payload. +// +// This is struct nlattr, from uapi/linux/netlink.h. +type NetlinkAttrHeader struct { + Length uint16 + Type uint16 +} + +// NetlinkAttrHeaderSize is the size of NetlinkAttrHeader. +const NetlinkAttrHeaderSize = 4 + +// NLA_ALIGNTO is the alignment of netlink attributes, from +// uapi/linux/netlink.h. +const NLA_ALIGNTO = 4 diff --git a/pkg/abi/linux/netlink_route.go b/pkg/abi/linux/netlink_route.go new file mode 100644 index 000000000..0d88bc5c5 --- /dev/null +++ b/pkg/abi/linux/netlink_route.go @@ -0,0 +1,186 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Netlink message types for NETLINK_ROUTE sockets, from uapi/linux/rtnetlink.h. +const ( + RTM_NEWLINK = 16 + RTM_DELLINK = 17 + RTM_GETLINK = 18 + RTM_SETLINK = 19 + + RTM_NEWADDR = 20 + RTM_DELADDR = 21 + RTM_GETADDR = 22 + + RTM_NEWROUTE = 24 + RTM_DELROUTE = 25 + RTM_GETROUTE = 26 + + RTM_NEWNEIGH = 28 + RTM_DELNEIGH = 29 + RTM_GETNEIGH = 30 + + RTM_NEWRULE = 32 + RTM_DELRULE = 33 + RTM_GETRULE = 34 + + RTM_NEWQDISC = 36 + RTM_DELQDISC = 37 + RTM_GETQDISC = 38 + + RTM_NEWTCLASS = 40 + RTM_DELTCLASS = 41 + RTM_GETTCLASS = 42 + + RTM_NEWTFILTER = 44 + RTM_DELTFILTER = 45 + RTM_GETTFILTER = 46 + + RTM_NEWACTION = 48 + RTM_DELACTION = 49 + RTM_GETACTION = 50 + + RTM_NEWPREFIX = 52 + + RTM_GETMULTICAST = 58 + + RTM_GETANYCAST = 62 + + RTM_NEWNEIGHTBL = 64 + RTM_GETNEIGHTBL = 66 + RTM_SETNEIGHTBL = 67 + + RTM_NEWNDUSEROPT = 68 + + RTM_NEWADDRLABEL = 72 + RTM_DELADDRLABEL = 73 + RTM_GETADDRLABEL = 74 + + RTM_GETDCB = 78 + RTM_SETDCB = 79 + + RTM_NEWNETCONF = 80 + RTM_GETNETCONF = 82 + + RTM_NEWMDB = 84 + RTM_DELMDB = 85 + RTM_GETMDB = 86 + + RTM_NEWNSID = 88 + RTM_DELNSID = 89 + RTM_GETNSID = 90 +) + +// InterfaceInfoMessage is struct ifinfomsg, from uapi/linux/rtnetlink.h. +type InterfaceInfoMessage struct { + Family uint8 + Padding uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +// Interface flags, from uapi/linux/if.h. +const ( + IFF_UP = 1 << 0 + IFF_BROADCAST = 1 << 1 + IFF_DEBUG = 1 << 2 + IFF_LOOPBACK = 1 << 3 + IFF_POINTOPOINT = 1 << 4 + IFF_NOTRAILERS = 1 << 5 + IFF_RUNNING = 1 << 6 + IFF_NOARP = 1 << 7 + IFF_PROMISC = 1 << 8 + IFF_ALLMULTI = 1 << 9 + IFF_MASTER = 1 << 10 + IFF_SLAVE = 1 << 11 + IFF_MULTICAST = 1 << 12 + IFF_PORTSEL = 1 << 13 + IFF_AUTOMEDIA = 1 << 14 + IFF_DYNAMIC = 1 << 15 + IFF_LOWER_UP = 1 << 16 + IFF_DORMANT = 1 << 17 + IFF_ECHO = 1 << 18 +) + +// Interface link attributes, from uapi/linux/if_link.h. +const ( + IFLA_UNSPEC = 0 + IFLA_ADDRESS = 1 + IFLA_BROADCAST = 2 + IFLA_IFNAME = 3 + IFLA_MTU = 4 + IFLA_LINK = 5 + IFLA_QDISC = 6 + IFLA_STATS = 7 + IFLA_COST = 8 + IFLA_PRIORITY = 9 + IFLA_MASTER = 10 + IFLA_WIRELESS = 11 + IFLA_PROTINFO = 12 + IFLA_TXQLEN = 13 + IFLA_MAP = 14 + IFLA_WEIGHT = 15 + IFLA_OPERSTATE = 16 + IFLA_LINKMODE = 17 + IFLA_LINKINFO = 18 + IFLA_NET_NS_PID = 19 + IFLA_IFALIAS = 20 + IFLA_NUM_VF = 21 + IFLA_VFINFO_LIST = 22 + IFLA_STATS64 = 23 + IFLA_VF_PORTS = 24 + IFLA_PORT_SELF = 25 + IFLA_AF_SPEC = 26 + IFLA_GROUP = 27 + IFLA_NET_NS_FD = 28 + IFLA_EXT_MASK = 29 + IFLA_PROMISCUITY = 30 + IFLA_NUM_TX_QUEUES = 31 + IFLA_NUM_RX_QUEUES = 32 + IFLA_CARRIER = 33 + IFLA_PHYS_PORT_ID = 34 + IFLA_CARRIER_CHANGES = 35 + IFLA_PHYS_SWITCH_ID = 36 + IFLA_LINK_NETNSID = 37 + IFLA_PHYS_PORT_NAME = 38 + IFLA_PROTO_DOWN = 39 + IFLA_GSO_MAX_SEGS = 40 + IFLA_GSO_MAX_SIZE = 41 +) + +// InterfaceAddrMessage is struct ifaddrmsg, from uapi/linux/if_addr.h. +type InterfaceAddrMessage struct { + Family uint8 + PrefixLen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +// Interface attributes, from uapi/linux/if_addr.h. +const ( + IFA_UNSPEC = 0 + IFA_ADDRESS = 1 + IFA_LOCAL = 2 + IFA_LABEL = 3 + IFA_BROADCAST = 4 + IFA_ANYCAST = 5 + IFA_CACHEINFO = 6 + IFA_MULTICAST = 7 + IFA_FLAGS = 8 +) diff --git a/pkg/abi/linux/poll.go b/pkg/abi/linux/poll.go new file mode 100644 index 000000000..f373cfca1 --- /dev/null +++ b/pkg/abi/linux/poll.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// PollFD is struct pollfd, used by poll(2)/ppoll(2), from uapi/asm-generic/poll.h. +type PollFD struct { + FD int32 + Events int16 + REvents int16 +} + +// Poll event flags, used by poll(2)/ppoll(2) and/or +// epoll_ctl(2)/epoll_wait(2), from uapi/asm-generic/poll.h. +const ( + POLLIN = 0x0001 + POLLPRI = 0x0002 + POLLOUT = 0x0004 + POLLERR = 0x0008 + POLLHUP = 0x0010 + POLLNVAL = 0x0020 + POLLRDNORM = 0x0040 + POLLRDBAND = 0x0080 + POLLWRNORM = 0x0100 + POLLWRBAND = 0x0200 + POLLMSG = 0x0400 + POLLREMOVE = 0x1000 + POLLRDHUP = 0x2000 + POLLFREE = 0x4000 + POLL_BUSY_LOOP = 0x8000 +) diff --git a/pkg/abi/linux/prctl.go b/pkg/abi/linux/prctl.go new file mode 100644 index 000000000..6c93601de --- /dev/null +++ b/pkg/abi/linux/prctl.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// PR_* flags, from <linux/pcrtl.h> for prctl(2). +const ( + // PR_SET_PDEATHSIG will set the process' death signal. + PR_SET_PDEATHSIG = 1 + + // PR_GET_PDEATHSIG will get the process' death signal. + PR_GET_PDEATHSIG = 2 + + // PR_GET_KEEPCAPS will get the value of the keep capabilities flag. + PR_GET_KEEPCAPS = 7 + + // PR_SET_KEEPCAPS will set the value of the keep capabilities flag. + PR_SET_KEEPCAPS = 8 + + // PR_SET_NAME will set the process' name. + PR_SET_NAME = 15 + + // PR_GET_NAME will get the process' name. + PR_GET_NAME = 16 + + // PR_SET_MM will modify certain kernel memory map descriptor fields of the + // calling process. See prctl(2) for more information. + PR_SET_MM = 35 + + // PR_SET_MM_EXE_FILE will supersede the /proc/pid/exe symbolic link with a + // new one pointing to a new executable file identified by the file descriptor + // provided in arg3 argument. See prctl(2) for more information. + PR_SET_MM_EXE_FILE = 13 + + // PR_SET_NO_NEW_PRIVS will set the calling thread's no_new_privs bit. + PR_SET_NO_NEW_PRIVS = 38 + + // PR_GET_NO_NEW_PRIVS will get the calling thread's no_new_privs bit. + PR_GET_NO_NEW_PRIVS = 39 + + // PR_SET_SECCOMP will set a process' seccomp mode. + PR_SET_SECCOMP = 22 + + // PR_GET_SECCOMP will get a process' seccomp mode. + PR_GET_SECCOMP = 21 + + // PR_CAPBSET_READ will get the capability bounding set. + PR_CAPBSET_READ = 23 + + // PR_CAPBSET_DROP will set the capability bounding set. + PR_CAPBSET_DROP = 24 +) + +// From <asm/prctl.h> +// Flags are used in syscall arch_prctl(2). +const ( + ARCH_SET_GS = 0x1001 + ARCH_SET_FS = 0x1002 + ARCH_GET_FS = 0x1003 + ARCH_GET_GS = 0x1004 +) diff --git a/pkg/abi/linux/rusage.go b/pkg/abi/linux/rusage.go new file mode 100644 index 000000000..a4a89abda --- /dev/null +++ b/pkg/abi/linux/rusage.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Flags that may be used with wait4(2) and getrusage(2). +const ( + // wait4(2) uses this to aggregate RUSAGE_SELF and RUSAGE_CHILDREN. + RUSAGE_BOTH = -0x2 + + // getrusage(2) flags. + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 +) + +// Rusage represents the Linux struct rusage. +type Rusage struct { + UTime Timeval + STime Timeval + MaxRSS int64 + IXRSS int64 + IDRSS int64 + ISRSS int64 + MinFlt int64 + MajFlt int64 + NSwap int64 + InBlock int64 + OuBlock int64 + MsgSnd int64 + MsgRcv int64 + NSignals int64 + NVCSw int64 + NIvCSw int64 +} diff --git a/pkg/abi/linux/sched.go b/pkg/abi/linux/sched.go new file mode 100644 index 000000000..05fda1604 --- /dev/null +++ b/pkg/abi/linux/sched.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Scheduling policies, exposed by sched_getscheduler(2)/sched_setscheduler(2). +const ( + SCHED_NORMAL = 0 + SCHED_FIFO = 1 + SCHED_RR = 2 + SCHED_BATCH = 3 + SCHED_IDLE = 5 + SCHED_DEADLINE = 6 + SCHED_MICROQ = 16 + + // SCHED_RESET_ON_FORK is a flag that indicates that the process is + // reverted back to SCHED_NORMAL on fork. + SCHED_RESET_ON_FORK = 0x40000000 +) diff --git a/pkg/abi/linux/seccomp.go b/pkg/abi/linux/seccomp.go new file mode 100644 index 000000000..a8de9d3d0 --- /dev/null +++ b/pkg/abi/linux/seccomp.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// Seccomp constants taken from <linux/seccomp.h>. +const ( + SECCOMP_MODE_NONE = 0 + SECCOMP_MODE_FILTER = 2 + + SECCOMP_RET_KILL = 0x00000000 + SECCOMP_RET_TRAP = 0x00030000 + SECCOMP_RET_ERRNO = 0x00050000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_ALLOW = 0x7fff0000 + + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_DATA = 0x0000ffff + + SECCOMP_SET_MODE_FILTER = 1 + SECCOMP_FILTER_FLAG_TSYNC = 1 +) + +const ( + // AUDIT_ARCH_X86_64 is taken from <linux/audit.h>. + AUDIT_ARCH_X86_64 = 0xc000003e +) diff --git a/pkg/abi/linux/sem.go b/pkg/abi/linux/sem.go new file mode 100644 index 000000000..f8d8debf1 --- /dev/null +++ b/pkg/abi/linux/sem.go @@ -0,0 +1,53 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// semctl Command Definitions. Source: //include/uapi/linux/sem.h +const ( + GETPID = 11 + GETVAL = 12 + GETALL = 13 + GETNCNT = 14 + GETZCNT = 15 + SETVAL = 16 + SETALL = 17 +) + +// ipcs ctl cmds. Source: //include/uapi/linux/sem.h +const ( + SEM_STAT = 18 + SEM_INFO = 19 +) + +const SEM_UNDO = 0x1000 + +// SemidDS is equivalent to struct semid_ds. +type SemidDS struct { + SemPerm IPCPerm + SemOTime TimeT + reserved1 uint32 + SemCTime TimeT + reserved2 uint32 + SemNSems uint32 + reserved3 uint32 + reserved4 uint32 +} + +// Sembuf is equivalent to struct sembuf. +type Sembuf struct { + SemNum uint16 + SemOp int16 + SemFlg int16 +} diff --git a/pkg/abi/linux/signal.go b/pkg/abi/linux/signal.go new file mode 100644 index 000000000..cd09008b5 --- /dev/null +++ b/pkg/abi/linux/signal.go @@ -0,0 +1,177 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/bits" +) + +const ( + // SignalMaximum is the highest valid signal number. + SignalMaximum = 64 + + // FirstStdSignal is the lowest standard signal number. + FirstStdSignal = 1 + + // LastStdSignal is the highest standard signal number. + LastStdSignal = 31 + + // FirstRTSignal is the lowest real-time signal number. + // + // 32 (SIGCANCEL) and 33 (SIGSETXID) are used internally by glibc. + FirstRTSignal = 32 + + // LastRTSignal is the highest real-time signal number. + LastRTSignal = 64 + + // NumStdSignals is the number of standard signals. + NumStdSignals = LastStdSignal - FirstStdSignal + 1 + + // NumRTSignals is the number of realtime signals. + NumRTSignals = LastRTSignal - FirstRTSignal + 1 +) + +// Signal is a signal number. +type Signal int + +// IsValid returns true if s is a valid standard or realtime signal. (0 is not +// considered valid; interfaces special-casing signal number 0 should check for +// 0 first before asserting validity.) +func (s Signal) IsValid() bool { + return s > 0 && s <= SignalMaximum +} + +// IsStandard returns true if s is a standard signal. +// +// Preconditions: s.IsValid(). +func (s Signal) IsStandard() bool { + return s <= LastStdSignal +} + +// IsRealtime returns true if s is a realtime signal. +// +// Preconditions: s.IsValid(). +func (s Signal) IsRealtime() bool { + return s >= FirstRTSignal +} + +// Index returns the index for signal s into arrays of both standard and +// realtime signals (e.g. signal masks). +// +// Preconditions: s.IsValid(). +func (s Signal) Index() int { + return int(s - 1) +} + +// Signals. +const ( + SIGABRT = Signal(6) + SIGALRM = Signal(14) + SIGBUS = Signal(7) + SIGCHLD = Signal(17) + SIGCLD = Signal(17) + SIGCONT = Signal(18) + SIGFPE = Signal(8) + SIGHUP = Signal(1) + SIGILL = Signal(4) + SIGINT = Signal(2) + SIGIO = Signal(29) + SIGIOT = Signal(6) + SIGKILL = Signal(9) + SIGPIPE = Signal(13) + SIGPOLL = Signal(29) + SIGPROF = Signal(27) + SIGPWR = Signal(30) + SIGQUIT = Signal(3) + SIGSEGV = Signal(11) + SIGSTKFLT = Signal(16) + SIGSTOP = Signal(19) + SIGSYS = Signal(31) + SIGTERM = Signal(15) + SIGTRAP = Signal(5) + SIGTSTP = Signal(20) + SIGTTIN = Signal(21) + SIGTTOU = Signal(22) + SIGUNUSED = Signal(31) + SIGURG = Signal(23) + SIGUSR1 = Signal(10) + SIGUSR2 = Signal(12) + SIGVTALRM = Signal(26) + SIGWINCH = Signal(28) + SIGXCPU = Signal(24) + SIGXFSZ = Signal(25) +) + +// SignalSet is a signal mask with a bit corresponding to each signal. +type SignalSet uint64 + +// SignalSetSize is the size in bytes of a SignalSet. +const SignalSetSize = 8 + +// MakeSignalSet returns SignalSet with the bit corresponding to each of the +// given signals set. +func MakeSignalSet(sigs ...Signal) SignalSet { + indices := make([]int, len(sigs)) + for i, sig := range sigs { + indices[i] = sig.Index() + } + return SignalSet(bits.Mask64(indices...)) +} + +// SignalSetOf returns a SignalSet with a single signal set. +func SignalSetOf(sig Signal) SignalSet { + return SignalSet(bits.MaskOf64(sig.Index())) +} + +// ForEachSignal invokes f for each signal set in the given mask. +func ForEachSignal(mask SignalSet, f func(sig Signal)) { + bits.ForEachSetBit64(uint64(mask), func(i int) { + f(Signal(i + 1)) + }) +} + +// 'how' values for rt_sigprocmask(2). +const ( + // SIG_BLOCK blocks the signals in the set. + SIG_BLOCK = 0 + + // SIG_UNBLOCK blocks the signals in the set. + SIG_UNBLOCK = 1 + + // SIG_SETMASK sets the signal mask to set. + SIG_SETMASK = 2 +) + +// Signal actions for rt_sigaction(2), from uapi/asm-generic/signal-defs.h. +const ( + // SIG_DFL performs the default action. + SIG_DFL = 0 + + // SIG_IGN ignores the signal. + SIG_IGN = 1 +) + +// Signal action flags for rt_sigaction(2), from uapi/asm-generic/signal.h +const ( + SA_NOCLDSTOP = 0x00000001 + SA_NOCLDWAIT = 0x00000002 + SA_SIGINFO = 0x00000004 + SA_ONSTACK = 0x08000000 + SA_RESTART = 0x10000000 + SA_NODEFER = 0x40000000 + SA_RESTARTHAND = 0x80000000 + SA_NOMASK = SA_NODEFER + SA_ONESHOT = SA_RESTARTHAND +) diff --git a/pkg/abi/linux/socket.go b/pkg/abi/linux/socket.go new file mode 100644 index 000000000..9a78cc131 --- /dev/null +++ b/pkg/abi/linux/socket.go @@ -0,0 +1,290 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import "gvisor.googlesource.com/gvisor/pkg/binary" + +// Address families, from linux/socket.h. +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_AX25 = 3 + AF_IPX = 4 + AF_APPLETALK = 5 + AF_NETROM = 6 + AF_BRIDGE = 7 + AF_ATMPVC = 8 + AF_X25 = 9 + AF_INET6 = 10 + AF_ROSE = 11 + AF_DECnet = 12 + AF_NETBEUI = 13 + AF_SECURITY = 14 + AF_KEY = 15 + AF_NETLINK = 16 + AF_PACKET = 17 + AF_ASH = 18 + AF_ECONET = 19 + AF_ATMSVC = 20 + AF_RDS = 21 + AF_SNA = 22 + AF_IRDA = 23 + AF_PPPOX = 24 + AF_WANPIPE = 25 + AF_LLC = 26 + AF_IB = 27 + AF_MPLS = 28 + AF_CAN = 29 + AF_TIPC = 30 + AF_BLUETOOTH = 31 + AF_IUCV = 32 + AF_RXRPC = 33 + AF_ISDN = 34 + AF_PHONET = 35 + AF_IEEE802154 = 36 + AF_CAIF = 37 + AF_ALG = 38 + AF_NFC = 39 + AF_VSOCK = 40 +) + +// sendmsg(2)/recvmsg(2) flags, from linux/socket.h. +const ( + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_TRYHARD = 0x4 + MSG_CTRUNC = 0x8 + MSG_PROBE = 0x10 + MSG_TRUNC = 0x20 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_WAITALL = 0x100 + MSG_FIN = 0x200 + MSG_EOF = MSG_FIN + MSG_SYN = 0x400 + MSG_CONFIRM = 0x800 + MSG_RST = 0x1000 + MSG_ERRQUEUE = 0x2000 + MSG_NOSIGNAL = 0x4000 + MSG_MORE = 0x8000 + MSG_WAITFORONE = 0x10000 + MSG_SENDPAGE_NOTLAST = 0x20000 + MSG_REINJECT = 0x8000000 + MSG_ZEROCOPY = 0x4000000 + MSG_FASTOPEN = 0x20000000 + MSG_CMSG_CLOEXEC = 0x40000000 +) + +// SOL_SOCKET is from socket.h +const SOL_SOCKET = 1 + +// Socket types, from linux/net.h. +const ( + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + SOCK_DCCP = 6 + SOCK_PACKET = 10 +) + +// SOCK_TYPE_MASK covers all of the above socket types. The remaining bits are +// flags. From linux/net.h. +const SOCK_TYPE_MASK = 0xf + +// socket(2)/socketpair(2)/accept4(2) flags, from linux/net.h. +const ( + SOCK_CLOEXEC = O_CLOEXEC + SOCK_NONBLOCK = O_NONBLOCK +) + +// shutdown(2) how commands, from <linux/net.h>. +const ( + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 +) + +// Socket options from socket.h. +const ( + SO_ERROR = 4 + SO_KEEPALIVE = 9 + SO_LINGER = 13 + SO_MARK = 36 + SO_PASSCRED = 16 + SO_PEERCRED = 17 + SO_PEERNAME = 28 + SO_PROTOCOL = 38 + SO_RCVBUF = 8 + SO_RCVTIMEO = 20 + SO_REUSEADDR = 2 + SO_SNDBUF = 7 + SO_SNDTIMEO = 21 + SO_TIMESTAMP = 29 + SO_TIMESTAMPNS = 35 + SO_TYPE = 3 +) + +// SockAddrInt is struct sockaddr_in, from uapi/linux/in.h. +type SockAddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte + Zero [8]uint8 // pad to sizeof(struct sockaddr). +} + +// SockAddrInt6 is struct sockaddr_in6, from uapi/linux/in6.h. +type SockAddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte + Scope_id uint32 +} + +// UnixPathMax is the maximum length of the path in an AF_UNIX socket. +// +// From uapi/linux/un.h. +const UnixPathMax = 108 + +// SockAddrUnix is struct sockaddr_un, from uapi/linux/un.h. +type SockAddrUnix struct { + Family uint16 + Path [UnixPathMax]int8 +} + +// TCPInfo is a collection of TCP statistics. +// +// From uapi/linux/tcp.h. +type TCPInfo struct { + State uint8 + CaState uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + // WindowScale is the combination of snd_wscale (first 4 bits) and rcv_wscale (second 4 bits) + WindowScale uint8 + // DeliveryRateAppLimited is a boolean and only the first bit is meaningful. + DeliveryRateAppLimited uint8 + + RTO uint32 + ATO uint32 + SndMss uint32 + RcvMss uint32 + + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + + // Times. + LastDataSent uint32 + LastAckSent uint32 + LastDataRecv uint32 + LastAckRecv uint32 + + // Metrics. + PMTU uint32 + RcvSsthresh uint32 + RTT uint32 + RTTVar uint32 + SndSsthresh uint32 + SndCwnd uint32 + Advmss uint32 + Reordering uint32 + + RcvRTT uint32 + RcvSpace uint32 + + TotalRetrans uint32 + + PacingRate uint64 + MaxPacingRate uint64 + // BytesAcked is RFC4898 tcpEStatsAppHCThruOctetsAcked. + BytesAcked uint64 + // BytesReceived is RFC4898 tcpEStatsAppHCThruOctetsReceived. + BytesReceived uint64 + // SegsOut is RFC4898 tcpEStatsPerfSegsOut. + SegsOut uint32 + // SegsIn is RFC4898 tcpEStatsPerfSegsIn. + SegsIn uint32 + + NotSentBytes uint32 + MinRTT uint32 + // DataSegsIn is RFC4898 tcpEStatsDataSegsIn. + DataSegsIn uint32 + // DataSegsOut is RFC4898 tcpEStatsDataSegsOut. + DataSegsOut uint32 + + DeliveryRate uint64 + + // BusyTime is the time in microseconds busy sending data. + BusyTime uint64 + // RwndLimited is the time in microseconds limited by receive window. + RwndLimited uint64 + // SndBufLimited is the time in microseconds limited by send buffer. + SndBufLimited uint64 +} + +// SizeOfTCPInfo is the binary size of a TCPInfo struct (104 bytes). +var SizeOfTCPInfo = binary.Size(TCPInfo{}) + +// Control message types, from linux/socket.h. +const ( + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 +) + +// A ControlMessageHeader is the header for a socket control message. +// +// ControlMessageHeader represents struct cmsghdr from linux/socket.h. +type ControlMessageHeader struct { + Length uint64 + Level int32 + Type int32 +} + +// SizeOfControlMessageHeader is the binary size of a ControlMessageHeader +// struct. +var SizeOfControlMessageHeader = int(binary.Size(ControlMessageHeader{})) + +// A ControlMessageCredentials is an SCM_CREDENTIALS socket control message. +// +// ControlMessageCredentials represents struct ucred from linux/socket.h. +type ControlMessageCredentials struct { + PID int32 + UID uint32 + GID uint32 +} + +// SizeOfControlMessageCredentials is the binary size of a +// ControlMessageCredentials struct. +var SizeOfControlMessageCredentials = int(binary.Size(ControlMessageCredentials{})) + +// A ControlMessageRights is an SCM_RIGHTS socket control message. +type ControlMessageRights []int32 + +// SizeOfControlMessageRight is the size of a single element in +// ControlMessageRights. +const SizeOfControlMessageRight = 4 + +// SCM_MAX_FD is the maximum number of FDs accepted in a single sendmsg call. +// From net/scm.h. +const SCM_MAX_FD = 253 diff --git a/pkg/abi/linux/time.go b/pkg/abi/linux/time.go new file mode 100644 index 000000000..9109a2848 --- /dev/null +++ b/pkg/abi/linux/time.go @@ -0,0 +1,224 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "math" + "time" +) + +const ( + // ClockTick is the length of time represented by a single clock tick, as + // used by times(2) and /proc/[pid]/stat. + ClockTick = time.Second / CLOCKS_PER_SEC + + // CLOCKS_PER_SEC is the number of ClockTicks per second. + // + // Linux defines this to be 100 on most architectures, irrespective of + // CONFIG_HZ. Userspace obtains the value through sysconf(_SC_CLK_TCK), + // which uses the AT_CLKTCK entry in the auxiliary vector if one is + // provided, and assumes 100 otherwise (glibc: + // sysdeps/posix/sysconf.c:__sysconf() => + // sysdeps/unix/sysv/linux/getclktck.c, elf/dl-support.c:_dl_aux_init()). + // + // Not to be confused with POSIX CLOCKS_PER_SEC, as used by clock(3); "XSI + // requires that [POSIX] CLOCKS_PER_SEC equals 1000000 independent of the + // actual resolution" - clock(3). + CLOCKS_PER_SEC = 100 +) + +// CPU clock types for use with clock_gettime(2) et al. +// +// The 29 most significant bits of a 32 bit clock ID are either a PID or a FD. +// +// Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3. +// +// Bit 2 indicates whether a cpu clock refers to a thread or a process. +const ( + CPUCLOCK_PROF = 0 + CPUCLOCK_VIRT = 1 + CPUCLOCK_SCHED = 2 + CPUCLOCK_MAX = 3 + CLOCKFD = CPUCLOCK_MAX + + CPUCLOCK_CLOCK_MASK = 3 + CPUCLOCK_PERTHREAD_MASK = 4 +) + +// Clock identifiers for use with clock_gettime(2), clock_getres(2), +// clock_nanosleep(2). +const ( + CLOCK_REALTIME = 0 + CLOCK_MONOTONIC = 1 + CLOCK_PROCESS_CPUTIME_ID = 2 + CLOCK_THREAD_CPUTIME_ID = 3 + CLOCK_MONOTONIC_RAW = 4 + CLOCK_REALTIME_COARSE = 5 + CLOCK_MONOTONIC_COARSE = 6 + CLOCK_BOOTTIME = 7 + CLOCK_REALTIME_ALARM = 8 + CLOCK_BOOTTIME_ALARM = 9 +) + +// Flags for clock_nanosleep(2). +const ( + TIMER_ABSTIME = 1 +) + +// Flags for timerfd syscalls (timerfd_create(2), timerfd_settime(2)). +const ( + // TFD_CLOEXEC is a timerfd_create flag. + TFD_CLOEXEC = O_CLOEXEC + + // TFD_NONBLOCK is a timerfd_create flag. + TFD_NONBLOCK = O_NONBLOCK + + // TFD_TIMER_ABSTIME is a timerfd_settime flag. + TFD_TIMER_ABSTIME = 1 +) + +// The safe number of seconds you can represent by int64. +const maxSecInDuration = math.MaxInt64 / int64(time.Second) + +// TimeT represents time_t in <time.h>. It represents time in seconds. +type TimeT int64 + +// NsecToTimeT translates nanoseconds to TimeT (seconds). +func NsecToTimeT(nsec int64) TimeT { + return TimeT(nsec / 1e9) +} + +// Timespec represents struct timespec in <time.h>. +type Timespec struct { + Sec int64 + Nsec int64 +} + +// Unix returns the second and nanosecond. +func (ts Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +// ToTime returns the Go time.Time representation. +func (ts Timespec) ToTime() time.Time { + return time.Unix(ts.Sec, ts.Nsec) +} + +// ToNsec returns the nanosecond representation. +func (ts Timespec) ToNsec() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +// ToNsecCapped returns the safe nanosecond representation. +func (ts Timespec) ToNsecCapped() int64 { + if ts.Sec > maxSecInDuration { + return math.MaxInt64 + } + return ts.ToNsec() +} + +// ToDuration returns the safe nanosecond representation as time.Duration. +func (ts Timespec) ToDuration() time.Duration { + return time.Duration(ts.ToNsecCapped()) +} + +// Valid returns whether the timespec contains valid values. +func (ts Timespec) Valid() bool { + return !(ts.Sec < 0 || ts.Nsec < 0 || ts.Nsec >= int64(time.Second)) +} + +// NsecToTimespec translates nanoseconds to Timespec. +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// DurationToTimespec translates time.Duration to Timespec. +func DurationToTimespec(dur time.Duration) Timespec { + return NsecToTimespec(dur.Nanoseconds()) +} + +// SizeOfTimeval is the size of a Timeval struct in bytes. +const SizeOfTimeval = 16 + +// Timeval represents struct timeval in <time.h>. +type Timeval struct { + Sec int64 + Usec int64 +} + +// ToNsecCapped returns the safe nanosecond representation. +func (tv Timeval) ToNsecCapped() int64 { + if tv.Sec > maxSecInDuration { + return math.MaxInt64 + } + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 +} + +// ToDuration returns the safe nanosecond representation as a time.Duration. +func (tv Timeval) ToDuration() time.Duration { + return time.Duration(tv.ToNsecCapped()) +} + +// ToTime returns the Go time.Time representation. +func (tv Timeval) ToTime() time.Time { + return time.Unix(tv.Sec, tv.Usec*1e3) +} + +// NsecToTimeval translates nanosecond to Timeval. +func NsecToTimeval(nsec int64) (tv Timeval) { + nsec += 999 // round up to microsecond + tv.Sec = nsec / 1e9 + tv.Usec = nsec % 1e9 / 1e3 + return +} + +// DurationToTimeval translates time.Duration to Timeval. +func DurationToTimeval(dur time.Duration) Timeval { + return NsecToTimeval(dur.Nanoseconds()) +} + +// Itimerspec represents struct itimerspec in <time.h>. +type Itimerspec struct { + Interval Timespec + Value Timespec +} + +// ItimerVal mimics the following struct in <sys/time.h> +// struct itimerval { +// struct timeval it_interval; /* next value */ +// struct timeval it_value; /* current value */ +// }; +type ItimerVal struct { + Interval Timeval + Value Timeval +} + +// ClockT represents type clock_t. +type ClockT int64 + +// ClockTFromDuration converts time.Duration to clock_t. +func ClockTFromDuration(d time.Duration) ClockT { + return ClockT(d / ClockTick) +} + +// Tms represents struct tms, used by times(2). +type Tms struct { + UTime ClockT + STime ClockT + CUTime ClockT + CSTime ClockT +} diff --git a/pkg/abi/linux/tty.go b/pkg/abi/linux/tty.go new file mode 100644 index 000000000..f9e641af9 --- /dev/null +++ b/pkg/abi/linux/tty.go @@ -0,0 +1,292 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +const ( + // NumControlCharacters is the number of control characters in Termios. + NumControlCharacters = 19 +) + +// Termios is struct termios, defined in uapi/asm-generic/termbits.h. +type Termios struct { + InputFlags uint32 + OutputFlags uint32 + ControlFlags uint32 + LocalFlags uint32 + LineDiscipline uint8 + ControlCharacters [NumControlCharacters]uint8 +} + +// KernelTermios is struct ktermios/struct termios2, defined in +// uapi/asm-generic/termbits.h. +type KernelTermios struct { + InputFlags uint32 + OutputFlags uint32 + ControlFlags uint32 + LocalFlags uint32 + LineDiscipline uint8 + ControlCharacters [NumControlCharacters]uint8 + InputSpeed uint32 + OutputSpeed uint32 +} + +// IEnabled returns whether flag is enabled in termios input flags. +func (t *KernelTermios) IEnabled(flag uint32) bool { + return t.InputFlags&flag == flag +} + +// OEnabled returns whether flag is enabled in termios output flags. +func (t *KernelTermios) OEnabled(flag uint32) bool { + return t.OutputFlags&flag == flag +} + +// CEnabled returns whether flag is enabled in termios control flags. +func (t *KernelTermios) CEnabled(flag uint32) bool { + return t.ControlFlags&flag == flag +} + +// LEnabled returns whether flag is enabled in termios local flags. +func (t *KernelTermios) LEnabled(flag uint32) bool { + return t.LocalFlags&flag == flag +} + +// ToTermios copies fields that are shared with Termios into a new Termios +// struct. +func (t *KernelTermios) ToTermios() Termios { + return Termios{ + InputFlags: t.InputFlags, + OutputFlags: t.OutputFlags, + ControlFlags: t.ControlFlags, + LocalFlags: t.LocalFlags, + LineDiscipline: t.LineDiscipline, + ControlCharacters: t.ControlCharacters, + } +} + +// FromTermios copies fields that are shared with Termios into this +// KernelTermios struct. +func (t *KernelTermios) FromTermios(term Termios) { + t.InputFlags = term.InputFlags + t.OutputFlags = term.OutputFlags + t.ControlFlags = term.ControlFlags + t.LocalFlags = term.LocalFlags + t.LineDiscipline = term.LineDiscipline + t.ControlCharacters = term.ControlCharacters +} + +// Input flags. +const ( + IGNBRK = 0000001 + BRKINT = 0000002 + IGNPAR = 0000004 + PARMRK = 0000010 + INPCK = 0000020 + ISTRIP = 0000040 + INLCR = 0000100 + IGNCR = 0000200 + ICRNL = 0000400 + IUCLC = 0001000 + IXON = 0002000 + IXANY = 0004000 + IXOFF = 0010000 + IMAXBEL = 0020000 + IUTF8 = 0040000 +) + +// Output flags. +const ( + OPOST = 0000001 + OLCUC = 0000002 + ONLCR = 0000004 + OCRNL = 0000010 + ONOCR = 0000020 + ONLRET = 0000040 + OFILL = 0000100 + OFDEL = 0000200 + NLDLY = 0000400 + NL0 = 0000000 + NL1 = 0000400 + CRDLY = 0003000 + CR0 = 0000000 + CR1 = 0001000 + CR2 = 0002000 + CR3 = 0003000 + TABDLY = 0014000 + TAB0 = 0000000 + TAB1 = 0004000 + TAB2 = 0010000 + TAB3 = 0014000 + XTABS = 0014000 + BSDLY = 0020000 + BS0 = 0000000 + BS1 = 0020000 + VTDLY = 0040000 + VT0 = 0000000 + VT1 = 0040000 + FFDLY = 0100000 + FF0 = 0000000 + FF1 = 0100000 +) + +// Control flags. +const ( + CBAUD = 0010017 + B0 = 0000000 + B50 = 0000001 + B75 = 0000002 + B110 = 0000003 + B134 = 0000004 + B150 = 0000005 + B200 = 0000006 + B300 = 0000007 + B600 = 0000010 + B1200 = 0000011 + B1800 = 0000012 + B2400 = 0000013 + B4800 = 0000014 + B9600 = 0000015 + B19200 = 0000016 + B38400 = 0000017 + EXTA = B19200 + EXTB = B38400 + CSIZE = 0000060 + CS5 = 0000000 + CS6 = 0000020 + CS7 = 0000040 + CS8 = 0000060 + CSTOPB = 0000100 + CREAD = 0000200 + PARENB = 0000400 + PARODD = 0001000 + HUPCL = 0002000 + CLOCAL = 0004000 + CBAUDEX = 0010000 + BOTHER = 0010000 + B57600 = 0010001 + B115200 = 0010002 + B230400 = 0010003 + B460800 = 0010004 + B500000 = 0010005 + B576000 = 0010006 + B921600 = 0010007 + B1000000 = 0010010 + B1152000 = 0010011 + B1500000 = 0010012 + B2000000 = 0010013 + B2500000 = 0010014 + B3000000 = 0010015 + B3500000 = 0010016 + B4000000 = 0010017 + CIBAUD = 002003600000 + CMSPAR = 010000000000 + CRTSCTS = 020000000000 + + // IBSHIFT is the shift from CBAUD to CIBAUD. + IBSHIFT = 16 +) + +// Local flags. +const ( + ISIG = 0000001 + ICANON = 0000002 + XCASE = 0000004 + ECHO = 0000010 + ECHOE = 0000020 + ECHOK = 0000040 + ECHONL = 0000100 + NOFLSH = 0000200 + TOSTOP = 0000400 + ECHOCTL = 0001000 + ECHOPRT = 0002000 + ECHOKE = 0004000 + FLUSHO = 0010000 + PENDIN = 0040000 + IEXTEN = 0100000 + EXTPROC = 0200000 +) + +// Control Character indices. +const ( + VINTR = 0 + VQUIT = 1 + VERASE = 2 + VKILL = 3 + VEOF = 4 + VTIME = 5 + VMIN = 6 + VSWTC = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VEOL = 11 + VREPRINT = 12 + VDISCARD = 13 + VWERASE = 14 + VLNEXT = 15 + VEOL2 = 16 +) + +// ControlCharacter returns the termios-style control character for the passed +// character. +// +// e.g., for Ctrl-C, i.e., ^C, call ControlCharacter('C'). +// +// Standard control characters are ASCII bytes 0 through 31. +func ControlCharacter(c byte) uint8 { + // A is 1, B is 2, etc. + return uint8(c - 'A' + 1) +} + +// DefaultControlCharacters is the default set of Termios control characters. +var DefaultControlCharacters = [NumControlCharacters]uint8{ + ControlCharacter('C'), // VINTR = ^C + ControlCharacter('\\'), // VQUIT = ^\ + '\x7f', // VERASE = DEL + ControlCharacter('U'), // VKILL = ^U + ControlCharacter('D'), // VEOF = ^D + 0, // VTIME + 1, // VMIN + 0, // VSWTC + ControlCharacter('Q'), // VSTART = ^Q + ControlCharacter('S'), // VSTOP = ^S + ControlCharacter('Z'), // VSUSP = ^Z + 0, // VEOL + ControlCharacter('R'), // VREPRINT = ^R + ControlCharacter('O'), // VDISCARD = ^O + ControlCharacter('W'), // VWERASE = ^W + ControlCharacter('V'), // VLNEXT = ^V + 0, // VEOL2 +} + +// MasterTermios is the terminal configuration of the master end of a Unix98 +// pseudoterminal. +var MasterTermios = KernelTermios{ + ControlFlags: B38400 | CS8 | CREAD, + ControlCharacters: DefaultControlCharacters, + InputSpeed: 38400, + OutputSpeed: 38400, +} + +// DefaultSlaveTermios is the default terminal configuration of the slave end +// of a Unix98 pseudoterminal. +var DefaultSlaveTermios = KernelTermios{ + InputFlags: ICRNL | IXON, + OutputFlags: OPOST | ONLCR, + ControlFlags: B38400 | CS8 | CREAD, + LocalFlags: ISIG | ICANON | ECHO | ECHOE | ECHOK | ECHOCTL | ECHOKE | IEXTEN, + ControlCharacters: DefaultControlCharacters, + InputSpeed: 38400, + OutputSpeed: 38400, +} diff --git a/pkg/abi/linux/uio.go b/pkg/abi/linux/uio.go new file mode 100644 index 000000000..93c972774 --- /dev/null +++ b/pkg/abi/linux/uio.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +// UIO_MAXIOV is the maximum number of struct iovecs in a struct iovec array. +const UIO_MAXIOV = 1024 diff --git a/pkg/abi/linux/utsname.go b/pkg/abi/linux/utsname.go new file mode 100644 index 000000000..7d33d20de --- /dev/null +++ b/pkg/abi/linux/utsname.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "bytes" + "fmt" +) + +const ( + // UTSLen is the maximum length of strings contained in fields of + // UtsName. + UTSLen = 64 +) + +// UtsName represents struct utsname, the struct returned by uname(2). +type UtsName struct { + Sysname [UTSLen + 1]byte + Nodename [UTSLen + 1]byte + Release [UTSLen + 1]byte + Version [UTSLen + 1]byte + Machine [UTSLen + 1]byte + Domainname [UTSLen + 1]byte +} + +// utsNameString converts a UtsName entry to a string without NULs. +func utsNameString(s [UTSLen + 1]byte) string { + // The NUL bytes will remain even in a cast to string. We must + // explicitly strip them. + return string(bytes.TrimRight(s[:], "\x00")) +} + +func (u UtsName) String() string { + return fmt.Sprintf("{Sysname: %s, Nodename: %s, Release: %s, Version: %s, Machine: %s, Domainname: %s}", + utsNameString(u.Sysname), utsNameString(u.Nodename), utsNameString(u.Release), + utsNameString(u.Version), utsNameString(u.Machine), utsNameString(u.Domainname)) +} diff --git a/pkg/amutex/BUILD b/pkg/amutex/BUILD new file mode 100644 index 000000000..442096319 --- /dev/null +++ b/pkg/amutex/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "amutex", + srcs = ["amutex.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/amutex", + visibility = ["//:sandbox"], +) + +go_test( + name = "amutex_test", + size = "small", + srcs = ["amutex_test.go"], + embed = [":amutex"], +) diff --git a/pkg/amutex/amutex.go b/pkg/amutex/amutex.go new file mode 100644 index 000000000..1cb73359a --- /dev/null +++ b/pkg/amutex/amutex.go @@ -0,0 +1,114 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package amutex provides the implementation of an abortable mutex. It allows +// the Lock() function to be canceled while it waits to acquire the mutex. +package amutex + +import ( + "sync/atomic" +) + +// Sleeper must be implemented by users of the abortable mutex to allow for +// cancelation of waits. +type Sleeper interface { + // SleepStart is called by the AbortableMutex.Lock() function when the + // mutex is contended and the goroutine is about to sleep. + // + // A channel can be returned that causes the sleep to be canceled if + // it's readable. If no cancelation is desired, nil can be returned. + SleepStart() <-chan struct{} + + // SleepFinish is called by AbortableMutex.Lock() once a contended mutex + // is acquired or the wait is aborted. + SleepFinish(success bool) +} + +// NoopSleeper is a stateless no-op implementation of Sleeper for anonymous +// embedding in other types that do not support cancelation. +type NoopSleeper struct{} + +// SleepStart implements Sleeper.SleepStart. +func (NoopSleeper) SleepStart() <-chan struct{} { + return nil +} + +// SleepFinish implements Sleeper.SleepFinish. +func (NoopSleeper) SleepFinish(success bool) {} + +// AbortableMutex is an abortable mutex. It allows Lock() to be aborted while it +// waits to acquire the mutex. +type AbortableMutex struct { + v int32 + ch chan struct{} +} + +// Init initializes the abortable mutex. +func (m *AbortableMutex) Init() { + m.v = 1 + m.ch = make(chan struct{}, 1) +} + +// Lock attempts to acquire the mutex, returning true on success. If something +// is written to the "c" while Lock waits, the wait is aborted and false is +// returned instead. +func (m *AbortableMutex) Lock(s Sleeper) bool { + // Uncontended case. + if atomic.AddInt32(&m.v, -1) == 0 { + return true + } + + var c <-chan struct{} + if s != nil { + c = s.SleepStart() + } + + for { + // Try to acquire the mutex again, at the same time making sure + // that m.v is negative, which indicates to the owner of the + // lock that it is contended, which ill force it to try to wake + // someone up when it releases the mutex. + if v := atomic.LoadInt32(&m.v); v >= 0 && atomic.SwapInt32(&m.v, -1) == 1 { + if s != nil { + s.SleepFinish(true) + } + return true + } + + // Wait for the owner to wake us up before trying again, or for + // the wait to be aborted by the provided channel. + select { + case <-m.ch: + case <-c: + // s must be non-nil, otherwise c would be nil and we'd + // never reach this path. + s.SleepFinish(false) + return false + } + } +} + +// Unlock releases the mutex. +func (m *AbortableMutex) Unlock() { + if atomic.SwapInt32(&m.v, 1) == 0 { + // There were no pending waiters. + return + } + + // Wake some waiter up. + select { + case m.ch <- struct{}{}: + default: + } +} diff --git a/pkg/amutex/amutex_test.go b/pkg/amutex/amutex_test.go new file mode 100644 index 000000000..876e47b19 --- /dev/null +++ b/pkg/amutex/amutex_test.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package amutex + +import ( + "sync" + "testing" + "time" +) + +type sleeper struct { + ch chan struct{} +} + +func (s *sleeper) SleepStart() <-chan struct{} { + return s.ch +} + +func (*sleeper) SleepFinish(bool) { +} + +func TestMutualExclusion(t *testing.T) { + var m AbortableMutex + m.Init() + + // Test mutual exclusion by running "gr" goroutines concurrently, and + // have each one increment a counter "iters" times within the critical + // section established by the mutex. + // + // If at the end of the counter is not gr * iters, then we know that + // goroutines ran concurrently within the critical section. + // + // If one of the goroutines doesn't complete, it's likely a bug that + // causes to to wait forever. + const gr = 1000 + const iters = 100000 + v := 0 + var wg sync.WaitGroup + for i := 0; i < gr; i++ { + wg.Add(1) + go func() { + for j := 0; j < iters; j++ { + m.Lock(nil) + v++ + m.Unlock() + } + wg.Done() + }() + } + + wg.Wait() + + if v != gr*iters { + t.Fatalf("Bad count: got %v, want %v", v, gr*iters) + } +} + +func TestAbortWait(t *testing.T) { + var s sleeper + var m AbortableMutex + m.Init() + + // Lock the mutex. + m.Lock(&s) + + // Lock again, but this time cancel after 500ms. + s.ch = make(chan struct{}, 1) + go func() { + time.Sleep(500 * time.Millisecond) + s.ch <- struct{}{} + }() + if v := m.Lock(&s); v { + t.Fatalf("Lock succeeded when it should have failed") + } + + // Lock again, but cancel right away. + s.ch <- struct{}{} + if v := m.Lock(&s); v { + t.Fatalf("Lock succeeded when it should have failed") + } +} diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD new file mode 100644 index 000000000..f20a9f855 --- /dev/null +++ b/pkg/atomicbitops/BUILD @@ -0,0 +1,21 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "atomicbitops", + srcs = [ + "atomic_bitops.go", + "atomic_bitops_amd64.s", + "atomic_bitops_common.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/atomicbitops", + visibility = ["//:sandbox"], +) + +go_test( + name = "atomicbitops_test", + size = "small", + srcs = ["atomic_bitops_test.go"], + embed = [":atomicbitops"], +) diff --git a/pkg/atomicbitops/atomic_bitops.go b/pkg/atomicbitops/atomic_bitops.go new file mode 100644 index 000000000..6635ea0d2 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +// Package atomicbitops provides basic bitwise operations in an atomic way. +// The implementation on amd64 leverages the LOCK prefix directly instead of +// relying on the generic cas primitives. +// +// WARNING: the bitwise ops provided in this package doesn't imply any memory +// ordering. Using them to construct locks must employ proper memory barriers. +package atomicbitops + +// AndUint32 atomically applies bitwise and operation to *addr with val. +func AndUint32(addr *uint32, val uint32) + +// OrUint32 atomically applies bitwise or operation to *addr with val. +func OrUint32(addr *uint32, val uint32) + +// XorUint32 atomically applies bitwise xor operation to *addr with val. +func XorUint32(addr *uint32, val uint32) + +// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// the value previously stored at addr. +func CompareAndSwapUint32(addr *uint32, old, new uint32) uint32 + +// AndUint64 atomically applies bitwise and operation to *addr with val. +func AndUint64(addr *uint64, val uint64) + +// OrUint64 atomically applies bitwise or operation to *addr with val. +func OrUint64(addr *uint64, val uint64) + +// XorUint64 atomically applies bitwise xor operation to *addr with val. +func XorUint64(addr *uint64, val uint64) + +// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns +// the value previously stored at addr. +func CompareAndSwapUint64(addr *uint64, old, new uint64) uint64 + +// IncUnlessZeroInt32 increments the value stored at the given address and +// returns true; unless the value stored in the pointer is zero, in which case +// it is left unmodified and false is returned. +func IncUnlessZeroInt32(addr *int32) bool + +// DecUnlessOneInt32 decrements the value stored at the given address and +// returns true; unless the value stored in the pointer is 1, in which case it +// is left unmodified and false is returned. +func DecUnlessOneInt32(addr *int32) bool diff --git a/pkg/atomicbitops/atomic_bitops_amd64.s b/pkg/atomicbitops/atomic_bitops_amd64.s new file mode 100644 index 000000000..542452bec --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_amd64.s @@ -0,0 +1,115 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +#include "textflag.h" + +TEXT ·AndUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + ANDL AX, 0(BP) + RET + +TEXT ·OrUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + ORL AX, 0(BP) + RET + +TEXT ·XorUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + XORL AX, 0(BP) + RET + +TEXT ·CompareAndSwapUint32(SB),$0-20 + MOVQ addr+0(FP), DI + MOVL old+8(FP), AX + MOVL new+12(FP), DX + LOCK + CMPXCHGL DX, 0(DI) + MOVL AX, ret+16(FP) + RET + +TEXT ·AndUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + ANDQ AX, 0(BP) + RET + +TEXT ·OrUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + ORQ AX, 0(BP) + RET + +TEXT ·XorUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + XORQ AX, 0(BP) + RET + +TEXT ·CompareAndSwapUint64(SB),$0-32 + MOVQ addr+0(FP), DI + MOVQ old+8(FP), AX + MOVQ new+16(FP), DX + LOCK + CMPXCHGQ DX, 0(DI) + MOVQ AX, ret+24(FP) + RET + +TEXT ·IncUnlessZeroInt32(SB),NOSPLIT,$0-9 + MOVQ addr+0(FP), DI + MOVL 0(DI), AX + +retry: + TESTL AX, AX + JZ fail + LEAL 1(AX), DX + LOCK + CMPXCHGL DX, 0(DI) + JNZ retry + + SETEQ ret+8(FP) + RET + +fail: + MOVB AX, ret+8(FP) + RET + +TEXT ·DecUnlessOneInt32(SB),NOSPLIT,$0-9 + MOVQ addr+0(FP), DI + MOVL 0(DI), AX + +retry: + LEAL -1(AX), DX + TESTL DX, DX + JZ fail + LOCK + CMPXCHGL DX, 0(DI) + JNZ retry + + SETEQ ret+8(FP) + RET + +fail: + MOVB DX, ret+8(FP) + RET diff --git a/pkg/atomicbitops/atomic_bitops_common.go b/pkg/atomicbitops/atomic_bitops_common.go new file mode 100644 index 000000000..542ff4e83 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_common.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !amd64 + +package atomicbitops + +import ( + "sync/atomic" +) + +// AndUint32 atomically applies bitwise and operation to *addr with val. +func AndUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o & val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// OrUint32 atomically applies bitwise or operation to *addr with val. +func OrUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o | val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// XorUint32 atomically applies bitwise xor operation to *addr with val. +func XorUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o ^ val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// the value previously stored at addr. +func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) { + for { + prev = atomic.LoadUint32(addr) + if prev != old { + return + } + if atomic.CompareAndSwapUint32(addr, old, new) { + return + } + } +} + +// AndUint64 atomically applies bitwise and operation to *addr with val. +func AndUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o & val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// OrUint64 atomically applies bitwise or operation to *addr with val. +func OrUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o | val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// XorUint64 atomically applies bitwise xor operation to *addr with val. +func XorUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o ^ val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns +// the value previously stored at addr. +func CompareAndSwapUint64(addr *uint64, old, new uint64) (prev uint64) { + for { + prev = atomic.LoadUint64(addr) + if prev != old { + return + } + if atomic.CompareAndSwapUint64(addr, old, new) { + return + } + } +} + +// IncUnlessZeroInt32 increments the value stored at the given address and +// returns true; unless the value stored in the pointer is zero, in which case +// it is left unmodified and false is returned. +func IncUnlessZeroInt32(addr *int32) bool { + for { + v := atomic.LoadInt32(addr) + if v == 0 { + return false + } + + if atomic.CompareAndSwapInt32(addr, v, v+1) { + return true + } + } +} + +// DecUnlessOneInt32 decrements the value stored at the given address and +// returns true; unless the value stored in the pointer is 1, in which case it +// is left unmodified and false is returned. +func DecUnlessOneInt32(addr *int32) bool { + for { + v := atomic.LoadInt32(addr) + if v == 1 { + return false + } + + if atomic.CompareAndSwapInt32(addr, v, v-1) { + return true + } + } +} diff --git a/pkg/atomicbitops/atomic_bitops_test.go b/pkg/atomicbitops/atomic_bitops_test.go new file mode 100644 index 000000000..ec0c07ee2 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_test.go @@ -0,0 +1,261 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomicbitops + +import ( + "runtime" + "sync" + "testing" +) + +const iterations = 100 + +func detectRaces32(val, target uint32, fn func(*uint32, uint32)) bool { + runtime.GOMAXPROCS(100) + for n := 0; n < iterations; n++ { + x := val + var wg sync.WaitGroup + for i := uint32(0); i < 32; i++ { + wg.Add(1) + go func(a *uint32, i uint32) { + defer wg.Done() + fn(a, uint32(1<<i)) + }(&x, i) + } + wg.Wait() + if x != target { + return true + } + } + return false +} + +func detectRaces64(val, target uint64, fn func(*uint64, uint64)) bool { + runtime.GOMAXPROCS(100) + for n := 0; n < iterations; n++ { + x := val + var wg sync.WaitGroup + for i := uint64(0); i < 64; i++ { + wg.Add(1) + go func(a *uint64, i uint64) { + defer wg.Done() + fn(a, uint64(1<<i)) + }(&x, i) + } + wg.Wait() + if x != target { + return true + } + } + return false +} + +func TestOrUint32(t *testing.T) { + if detectRaces32(0x0, 0xffffffff, OrUint32) { + t.Error("Data race detected!") + } +} + +func TestAndUint32(t *testing.T) { + if detectRaces32(0xf0f0f0f0, 0x00000000, AndUint32) { + t.Error("Data race detected!") + } +} + +func TestXorUint32(t *testing.T) { + if detectRaces32(0xf0f0f0f0, 0x0f0f0f0f, XorUint32) { + t.Error("Data race detected!") + } +} + +func TestOrUint64(t *testing.T) { + if detectRaces64(0x0, 0xffffffffffffffff, OrUint64) { + t.Error("Data race detected!") + } +} + +func TestAndUint64(t *testing.T) { + if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0, AndUint64) { + t.Error("Data race detected!") + } +} + +func TestXorUint64(t *testing.T) { + if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0f0f0f0f0f0f0f0f, XorUint64) { + t.Error("Data race detected!") + } +} + +func TestCompareAndSwapUint32(t *testing.T) { + tests := []struct { + name string + prev uint32 + old uint32 + new uint32 + next uint32 + }{ + { + name: "Successful compare-and-swap with prev == new", + prev: 10, + old: 10, + new: 10, + next: 10, + }, + { + name: "Successful compare-and-swap with prev != new", + prev: 20, + old: 20, + new: 22, + next: 22, + }, + { + name: "Failed compare-and-swap with prev == new", + prev: 31, + old: 30, + new: 31, + next: 31, + }, + { + name: "Failed compare-and-swap with prev != new", + prev: 41, + old: 40, + new: 42, + next: 41, + }, + } + for _, test := range tests { + val := test.prev + prev := CompareAndSwapUint32(&val, test.old, test.new) + if got, want := prev, test.prev; got != want { + t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want) + } + if got, want := val, test.next; got != want { + t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want) + } + } +} + +func TestCompareAndSwapUint64(t *testing.T) { + tests := []struct { + name string + prev uint64 + old uint64 + new uint64 + next uint64 + }{ + { + name: "Successful compare-and-swap with prev == new", + prev: 0x100000000, + old: 0x100000000, + new: 0x100000000, + next: 0x100000000, + }, + { + name: "Successful compare-and-swap with prev != new", + prev: 0x200000000, + old: 0x200000000, + new: 0x200000002, + next: 0x200000002, + }, + { + name: "Failed compare-and-swap with prev == new", + prev: 0x300000001, + old: 0x300000000, + new: 0x300000001, + next: 0x300000001, + }, + { + name: "Failed compare-and-swap with prev != new", + prev: 0x400000001, + old: 0x400000000, + new: 0x400000002, + next: 0x400000001, + }, + } + for _, test := range tests { + val := test.prev + prev := CompareAndSwapUint64(&val, test.old, test.new) + if got, want := prev, test.prev; got != want { + t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want) + } + if got, want := val, test.next; got != want { + t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want) + } + } +} + +func TestIncUnlessZeroInt32(t *testing.T) { + for _, test := range []struct { + initial int32 + final int32 + ret bool + }{ + { + initial: 0, + final: 0, + ret: false, + }, + { + initial: 1, + final: 2, + ret: true, + }, + { + initial: 2, + final: 3, + ret: true, + }, + } { + val := test.initial + if got, want := IncUnlessZeroInt32(&val), test.ret; got != want { + t.Errorf("For initial value of %d: incorrect return value: got %v, wanted %v", test.initial, got, want) + } + if got, want := val, test.final; got != want { + t.Errorf("For initial value of %d: incorrect final value: got %d, wanted %d", test.initial, got, want) + } + } +} + +func TestDecUnlessOneInt32(t *testing.T) { + for _, test := range []struct { + initial int32 + final int32 + ret bool + }{ + { + initial: 0, + final: -1, + ret: true, + }, + { + initial: 1, + final: 1, + ret: false, + }, + { + initial: 2, + final: 1, + ret: true, + }, + } { + val := test.initial + if got, want := DecUnlessOneInt32(&val), test.ret; got != want { + t.Errorf("For initial value of %d: incorrect return value: got %v, wanted %v", test.initial, got, want) + } + if got, want := val, test.final; got != want { + t.Errorf("For initial value of %d: incorrect final value: got %d, wanted %d", test.initial, got, want) + } + } +} diff --git a/pkg/binary/BUILD b/pkg/binary/BUILD new file mode 100644 index 000000000..16f08b13f --- /dev/null +++ b/pkg/binary/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "binary", + srcs = ["binary.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/binary", + visibility = ["//:sandbox"], +) + +go_test( + name = "binary_test", + size = "small", + srcs = ["binary_test.go"], + embed = [":binary"], +) diff --git a/pkg/binary/binary.go b/pkg/binary/binary.go new file mode 100644 index 000000000..df421bdb6 --- /dev/null +++ b/pkg/binary/binary.go @@ -0,0 +1,263 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package binary translates between select fixed-sized types and a binary +// representation. +package binary + +import ( + "encoding/binary" + "fmt" + "io" + "reflect" +) + +// LittleEndian is the same as encoding/binary.LittleEndian. +// +// It is included here as a convenience. +var LittleEndian = binary.LittleEndian + +// BigEndian is the same as encoding/binary.BigEndian. +// +// It is included here as a convenience. +var BigEndian = binary.BigEndian + +// AppendUint16 appends the binary representation of a uint16 to buf. +func AppendUint16(buf []byte, order binary.ByteOrder, num uint16) []byte { + buf = extendZero(2, buf) + order.PutUint16(buf[len(buf)-2:], num) + return buf +} + +// AppendUint32 appends the binary representation of a uint32 to buf. +func AppendUint32(buf []byte, order binary.ByteOrder, num uint32) []byte { + buf = extendZero(4, buf) + order.PutUint32(buf[len(buf)-4:], num) + return buf +} + +// AppendUint64 appends the binary representation of a uint64 to buf. +func AppendUint64(buf []byte, order binary.ByteOrder, num uint64) []byte { + buf = extendZero(8, buf) + order.PutUint64(buf[len(buf)-8:], num) + return buf +} + +// Marshal appends a binary representation of data to buf. +// +// data must only contain fixed-length signed and unsigned ints, arrays, +// slices, structs and compositions of said types. data may be a pointer, +// but cannot contain pointers. +func Marshal(buf []byte, order binary.ByteOrder, data interface{}) []byte { + return marshal(buf, order, reflect.Indirect(reflect.ValueOf(data))) +} + +func marshal(buf []byte, order binary.ByteOrder, data reflect.Value) []byte { + switch data.Kind() { + case reflect.Int8: + buf = append(buf, byte(int8(data.Int()))) + case reflect.Int16: + buf = AppendUint16(buf, order, uint16(int16(data.Int()))) + case reflect.Int32: + buf = AppendUint32(buf, order, uint32(int32(data.Int()))) + case reflect.Int64: + buf = AppendUint64(buf, order, uint64(data.Int())) + + case reflect.Uint8: + buf = append(buf, byte(data.Uint())) + case reflect.Uint16: + buf = AppendUint16(buf, order, uint16(data.Uint())) + case reflect.Uint32: + buf = AppendUint32(buf, order, uint32(data.Uint())) + case reflect.Uint64: + buf = AppendUint64(buf, order, data.Uint()) + + case reflect.Array, reflect.Slice: + for i, l := 0, data.Len(); i < l; i++ { + buf = marshal(buf, order, data.Index(i)) + } + + case reflect.Struct: + for i, l := 0, data.NumField(); i < l; i++ { + buf = marshal(buf, order, data.Field(i)) + } + + default: + panic("invalid type: " + data.Type().String()) + } + return buf +} + +// Unmarshal unpacks buf into data. +// +// data must be a slice or a pointer and buf must have a length of exactly +// Size(data). data must only contain fixed-length signed and unsigned ints, +// arrays, slices, structs and compositions of said types. +func Unmarshal(buf []byte, order binary.ByteOrder, data interface{}) { + value := reflect.ValueOf(data) + switch value.Kind() { + case reflect.Ptr: + value = value.Elem() + case reflect.Slice: + default: + panic("invalid type: " + value.Type().String()) + } + buf = unmarshal(buf, order, value) + if len(buf) != 0 { + panic(fmt.Sprintf("buffer too long by %d bytes", len(buf))) + } +} + +func unmarshal(buf []byte, order binary.ByteOrder, data reflect.Value) []byte { + switch data.Kind() { + case reflect.Int8: + data.SetInt(int64(int8(buf[0]))) + buf = buf[1:] + case reflect.Int16: + data.SetInt(int64(int16(order.Uint16(buf)))) + buf = buf[2:] + case reflect.Int32: + data.SetInt(int64(int32(order.Uint32(buf)))) + buf = buf[4:] + case reflect.Int64: + data.SetInt(int64(order.Uint64(buf))) + buf = buf[8:] + + case reflect.Uint8: + data.SetUint(uint64(buf[0])) + buf = buf[1:] + case reflect.Uint16: + data.SetUint(uint64(order.Uint16(buf))) + buf = buf[2:] + case reflect.Uint32: + data.SetUint(uint64(order.Uint32(buf))) + buf = buf[4:] + case reflect.Uint64: + data.SetUint(order.Uint64(buf)) + buf = buf[8:] + + case reflect.Array, reflect.Slice: + for i, l := 0, data.Len(); i < l; i++ { + buf = unmarshal(buf, order, data.Index(i)) + } + + case reflect.Struct: + for i, l := 0, data.NumField(); i < l; i++ { + if field := data.Field(i); field.CanSet() { + buf = unmarshal(buf, order, field) + } else { + buf = buf[sizeof(field):] + } + } + + default: + panic("invalid type: " + data.Type().String()) + } + return buf +} + +// Size calculates the buffer sized needed by Marshal or Unmarshal. +// +// Size only support the types supported by Marshal. +func Size(v interface{}) uintptr { + return sizeof(reflect.Indirect(reflect.ValueOf(v))) +} + +func sizeof(data reflect.Value) uintptr { + switch data.Kind() { + case reflect.Int8, reflect.Uint8: + return 1 + case reflect.Int16, reflect.Uint16: + return 2 + case reflect.Int32, reflect.Uint32: + return 4 + case reflect.Int64, reflect.Uint64: + return 8 + + case reflect.Array, reflect.Slice: + var size uintptr + for i, l := 0, data.Len(); i < l; i++ { + size += sizeof(data.Index(i)) + } + return size + + case reflect.Struct: + var size uintptr + for i, l := 0, data.NumField(); i < l; i++ { + size += sizeof(data.Field(i)) + } + return size + + default: + panic("invalid type: " + data.Type().String()) + } +} + +func extendZero(amount uintptr, buf []byte) []byte { + for i := uintptr(0); i < amount; i++ { + buf = append(buf, 0) + } + return buf +} + +// ReadUint16 reads a uint16 from r. +func ReadUint16(r io.Reader, order binary.ByteOrder) (uint16, error) { + buf := make([]byte, 2) + if _, err := io.ReadFull(r, buf); err != nil { + return 0, err + } + return order.Uint16(buf), nil +} + +// ReadUint32 reads a uint32 from r. +func ReadUint32(r io.Reader, order binary.ByteOrder) (uint32, error) { + buf := make([]byte, 4) + if _, err := io.ReadFull(r, buf); err != nil { + return 0, err + } + return order.Uint32(buf), nil +} + +// ReadUint64 reads a uint64 from r. +func ReadUint64(r io.Reader, order binary.ByteOrder) (uint64, error) { + buf := make([]byte, 8) + if _, err := io.ReadFull(r, buf); err != nil { + return 0, err + } + return order.Uint64(buf), nil +} + +// WriteUint16 writes a uint16 to w. +func WriteUint16(w io.Writer, order binary.ByteOrder, num uint16) error { + buf := make([]byte, 2) + order.PutUint16(buf, num) + _, err := w.Write(buf) + return err +} + +// WriteUint32 writes a uint32 to w. +func WriteUint32(w io.Writer, order binary.ByteOrder, num uint32) error { + buf := make([]byte, 4) + order.PutUint32(buf, num) + _, err := w.Write(buf) + return err +} + +// WriteUint64 writes a uint64 to w. +func WriteUint64(w io.Writer, order binary.ByteOrder, num uint64) error { + buf := make([]byte, 8) + order.PutUint64(buf, num) + _, err := w.Write(buf) + return err +} diff --git a/pkg/binary/binary_test.go b/pkg/binary/binary_test.go new file mode 100644 index 000000000..921a0369a --- /dev/null +++ b/pkg/binary/binary_test.go @@ -0,0 +1,265 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binary + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func newInt32(i int32) *int32 { + return &i +} + +func TestSize(t *testing.T) { + if got, want := Size(uint32(10)), uintptr(4); got != want { + t.Errorf("Got = %d, want = %d", got, want) + } +} + +func TestPanic(t *testing.T) { + tests := []struct { + name string + f func([]byte, binary.ByteOrder, interface{}) + data interface{} + want string + }{ + {"Unmarshal int", Unmarshal, 5, "invalid type: int"}, + {"Unmarshal []int", Unmarshal, []int{5}, "invalid type: int"}, + {"Marshal int", func(_ []byte, bo binary.ByteOrder, d interface{}) { Marshal(nil, bo, d) }, 5, "invalid type: int"}, + {"Marshal int[]", func(_ []byte, bo binary.ByteOrder, d interface{}) { Marshal(nil, bo, d) }, []int{5}, "invalid type: int"}, + {"Unmarshal short buffer", Unmarshal, newInt32(5), "runtime error: index out of range"}, + {"Unmarshal long buffer", func(_ []byte, bo binary.ByteOrder, d interface{}) { Unmarshal(make([]byte, 50), bo, d) }, newInt32(5), "buffer too long by 46 bytes"}, + {"marshal int", func(_ []byte, bo binary.ByteOrder, d interface{}) { marshal(nil, bo, reflect.ValueOf(d)) }, 5, "invalid type: int"}, + {"Size int", func(_ []byte, _ binary.ByteOrder, d interface{}) { Size(d) }, 5, "invalid type: int"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + defer func() { + r := recover() + if got := fmt.Sprint(r); got != test.want { + t.Errorf("Got recover() = %q, want = %q", got, test.want) + } + }() + + test.f(nil, LittleEndian, test.data) + }) + } +} + +type inner struct { + Field int32 +} + +type outer struct { + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + + Slice []int32 + Array [5]int32 + Struct inner +} + +func TestMarshalUnmarshal(t *testing.T) { + want := outer{ + 1, 2, 3, 4, 5, 6, 7, 8, + []int32{9, 10, 11}, + [5]int32{12, 13, 14, 15, 16}, + inner{17}, + } + buf := Marshal(nil, LittleEndian, want) + got := outer{Slice: []int32{0, 0, 0}} + Unmarshal(buf, LittleEndian, &got) + if !reflect.DeepEqual(&got, &want) { + t.Errorf("Got = %#v, want = %#v", got, want) + } +} + +type outerBenchmark struct { + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + + Array [5]int32 + Struct inner +} + +func BenchmarkMarshalUnmarshal(b *testing.B) { + b.ReportAllocs() + + in := outerBenchmark{ + 1, 2, 3, 4, 5, 6, 7, 8, + [5]int32{9, 10, 11, 12, 13}, + inner{14}, + } + buf := make([]byte, Size(&in)) + out := outerBenchmark{} + + for i := 0; i < b.N; i++ { + buf := Marshal(buf[:0], LittleEndian, &in) + Unmarshal(buf, LittleEndian, &out) + } +} + +func BenchmarkReadWrite(b *testing.B) { + b.ReportAllocs() + + in := outerBenchmark{ + 1, 2, 3, 4, 5, 6, 7, 8, + [5]int32{9, 10, 11, 12, 13}, + inner{14}, + } + buf := bytes.NewBuffer(make([]byte, binary.Size(&in))) + out := outerBenchmark{} + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := binary.Write(buf, LittleEndian, &in); err != nil { + b.Error("Write:", err) + } + if err := binary.Read(buf, LittleEndian, &out); err != nil { + b.Error("Read:", err) + } + } +} + +type outerPadding struct { + _ int8 + _ int16 + _ int32 + _ int64 + _ uint8 + _ uint16 + _ uint32 + _ uint64 + + _ []int32 + _ [5]int32 + _ inner +} + +func TestMarshalUnmarshalPadding(t *testing.T) { + var want outerPadding + buf := Marshal(nil, LittleEndian, want) + var got outerPadding + Unmarshal(buf, LittleEndian, &got) + if !reflect.DeepEqual(&got, &want) { + t.Errorf("Got = %#v, want = %#v", got, want) + } +} + +// Numbers with bits in every byte that distinguishable in big and little endian. +const ( + want16 = 64<<8 | 128 + want32 = 16<<24 | 32<<16 | want16 + want64 = 1<<56 | 2<<48 | 4<<40 | 8<<32 | want32 +) + +func TestReadWriteUint16(t *testing.T) { + const want = uint16(want16) + var buf bytes.Buffer + if err := WriteUint16(&buf, LittleEndian, want); err != nil { + t.Error("WriteUint16:", err) + } + got, err := ReadUint16(&buf, LittleEndian) + if err != nil { + t.Error("ReadUint16:", err) + } + if got != want { + t.Errorf("got = %d, want = %d", got, want) + } +} + +func TestReadWriteUint32(t *testing.T) { + const want = uint32(want32) + var buf bytes.Buffer + if err := WriteUint32(&buf, LittleEndian, want); err != nil { + t.Error("WriteUint32:", err) + } + got, err := ReadUint32(&buf, LittleEndian) + if err != nil { + t.Error("ReadUint32:", err) + } + if got != want { + t.Errorf("got = %d, want = %d", got, want) + } +} + +func TestReadWriteUint64(t *testing.T) { + const want = uint64(want64) + var buf bytes.Buffer + if err := WriteUint64(&buf, LittleEndian, want); err != nil { + t.Error("WriteUint64:", err) + } + got, err := ReadUint64(&buf, LittleEndian) + if err != nil { + t.Error("ReadUint64:", err) + } + if got != want { + t.Errorf("got = %d, want = %d", got, want) + } +} + +type readWriter struct { + err error +} + +func (rw *readWriter) Write([]byte) (int, error) { + return 0, rw.err +} + +func (rw *readWriter) Read([]byte) (int, error) { + return 0, rw.err +} + +func TestReadWriteError(t *testing.T) { + tests := []struct { + name string + f func(rw io.ReadWriter) error + }{ + {"WriteUint16", func(rw io.ReadWriter) error { return WriteUint16(rw, LittleEndian, 0) }}, + {"ReadUint16", func(rw io.ReadWriter) error { _, err := ReadUint16(rw, LittleEndian); return err }}, + {"WriteUint32", func(rw io.ReadWriter) error { return WriteUint32(rw, LittleEndian, 0) }}, + {"ReadUint32", func(rw io.ReadWriter) error { _, err := ReadUint32(rw, LittleEndian); return err }}, + {"WriteUint64", func(rw io.ReadWriter) error { return WriteUint64(rw, LittleEndian, 0) }}, + {"ReadUint64", func(rw io.ReadWriter) error { _, err := ReadUint64(rw, LittleEndian); return err }}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + want := errors.New("want") + if got := test.f(&readWriter{want}); got != want { + t.Errorf("got = %v, want = %v", got, want) + } + }) + } +} diff --git a/pkg/bits/BUILD b/pkg/bits/BUILD new file mode 100644 index 000000000..9897e5dc3 --- /dev/null +++ b/pkg/bits/BUILD @@ -0,0 +1,55 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance") + +go_library( + name = "bits", + srcs = [ + "bits.go", + "bits32.go", + "bits64.go", + "uint64_arch_amd64.go", + "uint64_arch_amd64_asm.s", + "uint64_arch_generic.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/bits", + visibility = ["//:sandbox"], +) + +go_template( + name = "bits_template", + srcs = ["bits_template.go"], + types = [ + "T", + ], +) + +go_template_instance( + name = "bits64", + out = "bits64.go", + package = "bits", + suffix = "64", + template = ":bits_template", + types = { + "T": "uint64", + }, +) + +go_template_instance( + name = "bits32", + out = "bits32.go", + package = "bits", + suffix = "32", + template = ":bits_template", + types = { + "T": "uint32", + }, +) + +go_test( + name = "bits_test", + size = "small", + srcs = ["uint64_test.go"], + embed = [":bits"], +) diff --git a/pkg/bits/bits.go b/pkg/bits/bits.go new file mode 100644 index 000000000..50ca4bff7 --- /dev/null +++ b/pkg/bits/bits.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bits includes all bit related types and operations. +package bits diff --git a/pkg/bits/bits_template.go b/pkg/bits/bits_template.go new file mode 100644 index 000000000..0a01f29c2 --- /dev/null +++ b/pkg/bits/bits_template.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bits + +// Non-atomic bit operations on a template type T. + +// T is a required type parameter that must be an integral type. +type T uint64 + +// IsOn returns true if *all* bits set in 'bits' are set in 'mask'. +func IsOn(mask, bits T) bool { + return mask&bits == bits +} + +// IsAnyOn returns true if *any* bit set in 'bits' is set in 'mask'. +func IsAnyOn(mask, bits T) bool { + return mask&bits != 0 +} + +// Mask returns a T with all of the given bits set. +func Mask(is ...int) T { + ret := T(0) + for _, i := range is { + ret |= MaskOf(i) + } + return ret +} + +// MaskOf is like Mask, but sets only a single bit (more efficiently). +func MaskOf(i int) T { + return T(1) << T(i) +} diff --git a/pkg/bits/uint64_arch_amd64.go b/pkg/bits/uint64_arch_amd64.go new file mode 100644 index 000000000..068597f68 --- /dev/null +++ b/pkg/bits/uint64_arch_amd64.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package bits + +// TrailingZeros64 returns the number of bits before the least significant 1 +// bit in x; in other words, it returns the index of the least significant 1 +// bit in x. If x is 0, TrailingZeros64 returns 64. +func TrailingZeros64(x uint64) int + +// MostSignificantOne64 returns the index of the most significant 1 bit in +// x. If x is 0, MostSignificantOne64 returns 64. +func MostSignificantOne64(x uint64) int + +// ForEachSetBit64 calls f once for each set bit in x, with argument i equal to +// the set bit's index. +func ForEachSetBit64(x uint64, f func(i int)) { + for x != 0 { + i := TrailingZeros64(x) + f(i) + x &^= MaskOf64(i) + } +} diff --git a/pkg/bits/uint64_arch_amd64_asm.s b/pkg/bits/uint64_arch_amd64_asm.s new file mode 100644 index 000000000..33885641a --- /dev/null +++ b/pkg/bits/uint64_arch_amd64_asm.s @@ -0,0 +1,31 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +TEXT ·TrailingZeros64(SB),$0-16 + BSFQ x+0(FP), AX + JNZ end + MOVQ $64, AX +end: + MOVQ AX, ret+8(FP) + RET + +TEXT ·MostSignificantOne64(SB),$0-16 + BSRQ x+0(FP), AX + JNZ end + MOVQ $64, AX +end: + MOVQ AX, ret+8(FP) + RET diff --git a/pkg/bits/uint64_arch_generic.go b/pkg/bits/uint64_arch_generic.go new file mode 100644 index 000000000..862033a4b --- /dev/null +++ b/pkg/bits/uint64_arch_generic.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !amd64 + +package bits + +// TrailingZeros64 returns the number of bits before the least significant 1 +// bit in x; in other words, it returns the index of the least significant 1 +// bit in x. If x is 0, TrailingZeros64 returns 64. +func TrailingZeros64(x uint64) int { + if x == 0 { + return 64 + } + i := 0 + for ; x&1 == 0; i++ { + x >>= 1 + } + return i +} + +// MostSignificantOne64 returns the index of the most significant 1 bit in +// x. If x is 0, MostSignificantOne64 returns 64. +func MostSignificantOne64(x uint64) int { + if x == 0 { + return 64 + } + i := 63 + for ; x&(1<<63) == 0; i-- { + x <<= 1 + } + return i +} + +// ForEachSetBit64 calls f once for each set bit in x, with argument i equal to +// the set bit's index. +func ForEachSetBit64(x uint64, f func(i int)) { + for i := 0; x != 0; i++ { + if x&1 != 0 { + f(i) + } + x >>= 1 + } +} diff --git a/pkg/bits/uint64_test.go b/pkg/bits/uint64_test.go new file mode 100644 index 000000000..906017e1a --- /dev/null +++ b/pkg/bits/uint64_test.go @@ -0,0 +1,116 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bits + +import ( + "reflect" + "testing" +) + +func TestTrailingZeros64(t *testing.T) { + for i := 0; i <= 64; i++ { + n := uint64(1) << uint(i) + if got, want := TrailingZeros64(n), i; got != want { + t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want) + } + } + + for i := 0; i < 64; i++ { + n := ^uint64(0) << uint(i) + if got, want := TrailingZeros64(n), i; got != want { + t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want) + } + } + + for i := 0; i < 64; i++ { + n := ^uint64(0) >> uint(i) + if got, want := TrailingZeros64(n), 0; got != want { + t.Errorf("TrailingZeros64(%#x): got %d, wanted %d", n, got, want) + } + } +} + +func TestMostSignificantOne64(t *testing.T) { + for i := 0; i <= 64; i++ { + n := uint64(1) << uint(i) + if got, want := MostSignificantOne64(n), i; got != want { + t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want) + } + } + + for i := 0; i < 64; i++ { + n := ^uint64(0) >> uint(i) + if got, want := MostSignificantOne64(n), 63-i; got != want { + t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want) + } + } + + for i := 0; i < 64; i++ { + n := ^uint64(0) << uint(i) + if got, want := MostSignificantOne64(n), 63; got != want { + t.Errorf("MostSignificantOne64(%#x): got %d, wanted %d", n, got, want) + } + } +} + +func TestForEachSetBit64(t *testing.T) { + for _, want := range [][]int{ + {}, + {0}, + {1}, + {63}, + {0, 1}, + {1, 3, 5}, + {0, 63}, + } { + n := Mask64(want...) + // "Slice values are deeply equal when ... they are both nil or both + // non-nil ..." + got := make([]int, 0) + ForEachSetBit64(n, func(i int) { + got = append(got, i) + }) + if !reflect.DeepEqual(got, want) { + t.Errorf("ForEachSetBit64(%#x): iterated bits %v, wanted %v", n, got, want) + } + } +} + +func TestIsOn(t *testing.T) { + type spec struct { + mask uint64 + bits uint64 + any bool + all bool + } + for _, s := range []spec{ + {Mask64(0), Mask64(0), true, true}, + {Mask64(63), Mask64(63), true, true}, + {Mask64(0), Mask64(1), false, false}, + {Mask64(0), Mask64(0, 1), true, false}, + + {Mask64(1, 63), Mask64(1), true, true}, + {Mask64(1, 63), Mask64(1, 63), true, true}, + {Mask64(1, 63), Mask64(0, 1, 63), true, false}, + {Mask64(1, 63), Mask64(0, 62), false, false}, + } { + if ok := IsAnyOn64(s.mask, s.bits); ok != s.any { + t.Errorf("IsAnyOn(%#x, %#x) = %v, wanted: %v", s.mask, s.bits, ok, s.any) + } + if ok := IsOn64(s.mask, s.bits); ok != s.all { + t.Errorf("IsOn(%#x, %#x) = %v, wanted: %v", s.mask, s.bits, ok, s.all) + } + } +} diff --git a/pkg/bpf/BUILD b/pkg/bpf/BUILD new file mode 100644 index 000000000..d4f12f13a --- /dev/null +++ b/pkg/bpf/BUILD @@ -0,0 +1,46 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "bpf_state", + srcs = [ + "interpreter.go", + ], + out = "bpf_state.go", + package = "bpf", +) + +go_library( + name = "bpf", + srcs = [ + "bpf.go", + "bpf_state.go", + "decoder.go", + "input_bytes.go", + "interpreter.go", + "program_builder.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/bpf", + visibility = ["//visibility:public"], + deps = [ + "//pkg/abi/linux", + "//pkg/state", + ], +) + +go_test( + name = "bpf_test", + size = "small", + srcs = [ + "decoder_test.go", + "interpreter_test.go", + "program_builder_test.go", + ], + embed = [":bpf"], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + ], +) diff --git a/pkg/bpf/bpf.go b/pkg/bpf/bpf.go new file mode 100644 index 000000000..757744090 --- /dev/null +++ b/pkg/bpf/bpf.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bpf provides tools for working with Berkeley Packet Filter (BPF) +// programs. More information on BPF can be found at +// https://www.freebsd.org/cgi/man.cgi?bpf(4) +package bpf + +import "gvisor.googlesource.com/gvisor/pkg/abi/linux" + +const ( + // MaxInstructions is the maximum number of instructions in a BPF program, + // and is equal to Linux's BPF_MAXINSNS. + MaxInstructions = 4096 + + // ScratchMemRegisters is the number of M registers in a BPF virtual machine, + // and is equal to Linux's BPF_MEMWORDS. + ScratchMemRegisters = 16 +) + +// Parts of a linux.BPFInstruction.OpCode. Compare to the Linux kernel's +// include/uapi/linux/filter.h. +// +// In the comments below: +// +// - A, X, and M[] are BPF virtual machine registers. +// +// - K refers to the instruction field linux.BPFInstruction.K. +// +// - Bits are counted from the LSB position. +const ( + // Instruction class, stored in bits 0-2. + Ld = 0x00 // load into A + Ldx = 0x01 // load into X + St = 0x02 // store from A + Stx = 0x03 // store from X + Alu = 0x04 // arithmetic + Jmp = 0x05 // jump + Ret = 0x06 // return + Misc = 0x07 + instructionClassMask = 0x07 + + // Size of a load, stored in bits 3-4. + W = 0x00 // 32 bits + H = 0x08 // 16 bits + B = 0x10 // 8 bits + loadSizeMask = 0x18 + + // Source operand for a load, stored in bits 5-7. + // Address mode numbers in the comments come from Linux's + // Documentation/networking/filter.txt. + Imm = 0x00 // immediate value K (mode 4) + Abs = 0x20 // data in input at byte offset K (mode 1) + Ind = 0x40 // data in input at byte offset X+K (mode 2) + Mem = 0x60 // M[K] (mode 3) + Len = 0x80 // length of the input in bytes ("BPF extension len") + Msh = 0xa0 // 4 * lower nibble of input at byte offset K (mode 5) + loadModeMask = 0xe0 + + // Source operands for arithmetic, jump, and return instructions. + // Arithmetic and jump instructions can use K or X as source operands. + // Return instructions can use K or A as source operands. + K = 0x00 // still mode 4 + X = 0x08 // mode 0 + A = 0x10 // mode 9 + srcAluJmpMask = 0x08 + srcRetMask = 0x18 + + // Arithmetic instructions, stored in bits 4-7. + Add = 0x00 + Sub = 0x10 // A - src + Mul = 0x20 + Div = 0x30 // A / src + Or = 0x40 + And = 0x50 + Lsh = 0x60 // A << src + Rsh = 0x70 // A >> src + Neg = 0x80 // -A (src ignored) + Mod = 0x90 // A % src + Xor = 0xa0 + aluMask = 0xf0 + + // Jump instructions, stored in bits 4-7. + Ja = 0x00 // unconditional (uses K for jump offset) + Jeq = 0x10 // if A == src + Jgt = 0x20 // if A > src + Jge = 0x30 // if A >= src + Jset = 0x40 // if (A & src) != 0 + jmpMask = 0xf0 + + // Miscellaneous instructions, stored in bits 3-7. + Tax = 0x00 // A = X + Txa = 0x80 // X = A + miscMask = 0xf8 + + // Masks for bits that should be zero. + unusedBitsMask = 0xff00 // all valid instructions use only bits 0-7 + storeUnusedBitsMask = 0xf8 // stores only use instruction class + retUnusedBitsMask = 0xe0 // returns only use instruction class and source operand +) + +// Stmt returns a linux.BPFInstruction representing a BPF non-jump instruction. +func Stmt(code uint16, k uint32) linux.BPFInstruction { + return linux.BPFInstruction{ + OpCode: code, + K: k, + } +} + +// Jump returns a linux.BPFInstruction representing a BPF jump instruction. +func Jump(code uint16, k uint32, jt, jf uint8) linux.BPFInstruction { + return linux.BPFInstruction{ + OpCode: code, + JumpIfTrue: jt, + JumpIfFalse: jf, + K: k, + } +} diff --git a/pkg/bpf/decoder.go b/pkg/bpf/decoder.go new file mode 100644 index 000000000..6873ffa5c --- /dev/null +++ b/pkg/bpf/decoder.go @@ -0,0 +1,248 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// DecodeProgram translates an array of BPF instructions into text format. +func DecodeProgram(program []linux.BPFInstruction) (string, error) { + var ret bytes.Buffer + for line, s := range program { + ret.WriteString(fmt.Sprintf("%v: ", line)) + if err := decode(s, line, &ret); err != nil { + return "", err + } + ret.WriteString("\n") + } + return ret.String(), nil +} + +// Decode translates BPF instruction into text format. +func Decode(inst linux.BPFInstruction) (string, error) { + var ret bytes.Buffer + err := decode(inst, -1, &ret) + return ret.String(), err +} + +func decode(inst linux.BPFInstruction, line int, w *bytes.Buffer) error { + var err error + switch inst.OpCode & instructionClassMask { + case Ld: + err = decodeLd(inst, w) + case Ldx: + err = decodeLdx(inst, w) + case St: + w.WriteString(fmt.Sprintf("M[%v] <- A", inst.K)) + case Stx: + w.WriteString(fmt.Sprintf("M[%v] <- X", inst.K)) + case Alu: + err = decodeAlu(inst, w) + case Jmp: + err = decodeJmp(inst, line, w) + case Ret: + err = decodeRet(inst, w) + case Misc: + err = decodeMisc(inst, w) + default: + return fmt.Errorf("invalid BPF instruction: %v", inst) + } + return err +} + +// A <- P[k:4] +func decodeLd(inst linux.BPFInstruction, w *bytes.Buffer) error { + w.WriteString("A <- ") + + switch inst.OpCode & loadModeMask { + case Imm: + w.WriteString(fmt.Sprintf("%v", inst.K)) + case Abs: + w.WriteString(fmt.Sprintf("P[%v:", inst.K)) + if err := decodeLdSize(inst, w); err != nil { + return err + } + w.WriteString("]") + case Ind: + w.WriteString(fmt.Sprintf("P[X+%v:", inst.K)) + if err := decodeLdSize(inst, w); err != nil { + return err + } + w.WriteString("]") + case Mem: + w.WriteString(fmt.Sprintf("M[%v]", inst.K)) + case Len: + w.WriteString("len") + default: + return fmt.Errorf("invalid BPF LD instruction: %v", inst) + } + return nil +} + +func decodeLdSize(inst linux.BPFInstruction, w *bytes.Buffer) error { + switch inst.OpCode & loadSizeMask { + case W: + w.WriteString("4") + case H: + w.WriteString("2") + case B: + w.WriteString("1") + default: + return fmt.Errorf("Invalid BPF LD size: %v", inst) + } + return nil +} + +// X <- P[k:4] +func decodeLdx(inst linux.BPFInstruction, w *bytes.Buffer) error { + w.WriteString("X <- ") + + switch inst.OpCode & loadModeMask { + case Imm: + w.WriteString(fmt.Sprintf("%v", inst.K)) + case Mem: + w.WriteString(fmt.Sprintf("M[%v]", inst.K)) + case Len: + w.WriteString("len") + case Msh: + w.WriteString(fmt.Sprintf("4*(P[%v:1]&0xf)", inst.K)) + default: + return fmt.Errorf("invalid BPF LDX instruction: %v", inst) + } + return nil +} + +// A <- A + k +func decodeAlu(inst linux.BPFInstruction, w *bytes.Buffer) error { + code := inst.OpCode & aluMask + if code == Neg { + w.WriteString("A <- -A") + return nil + } + + w.WriteString("A <- A ") + switch code { + case Add: + w.WriteString("+ ") + case Sub: + w.WriteString("- ") + case Mul: + w.WriteString("* ") + case Div: + w.WriteString("/ ") + case Or: + w.WriteString("| ") + case And: + w.WriteString("& ") + case Lsh: + w.WriteString("<< ") + case Rsh: + w.WriteString(">> ") + case Mod: + w.WriteString("% ") + case Xor: + w.WriteString("^ ") + default: + return fmt.Errorf("invalid BPF ALU instruction: %v", inst) + } + if err := decodeSource(inst, w); err != nil { + return err + } + return nil +} + +func decodeSource(inst linux.BPFInstruction, w *bytes.Buffer) error { + switch inst.OpCode & srcAluJmpMask { + case K: + w.WriteString(fmt.Sprintf("%v", inst.K)) + case X: + w.WriteString("X") + default: + return fmt.Errorf("invalid BPF ALU/JMP source instruction: %v", inst) + } + return nil +} + +// pc += (A > k) ? jt : jf +func decodeJmp(inst linux.BPFInstruction, line int, w *bytes.Buffer) error { + code := inst.OpCode & jmpMask + + w.WriteString("pc += ") + if code == Ja { + w.WriteString(printJmpTarget(inst.K, line)) + } else { + w.WriteString("(A ") + switch code { + case Jeq: + w.WriteString("== ") + case Jgt: + w.WriteString("> ") + case Jge: + w.WriteString(">= ") + case Jset: + w.WriteString("& ") + default: + return fmt.Errorf("invalid BPF ALU instruction: %v", inst) + } + if err := decodeSource(inst, w); err != nil { + return err + } + w.WriteString( + fmt.Sprintf(") ? %s : %s", + printJmpTarget(uint32(inst.JumpIfTrue), line), + printJmpTarget(uint32(inst.JumpIfFalse), line))) + } + return nil +} + +func printJmpTarget(target uint32, line int) string { + if line == -1 { + return fmt.Sprintf("%v", target) + } + return fmt.Sprintf("%v [%v]", target, int(target)+line+1) +} + +// ret k +func decodeRet(inst linux.BPFInstruction, w *bytes.Buffer) error { + w.WriteString("ret ") + + code := inst.OpCode & srcRetMask + switch code { + case K: + w.WriteString(fmt.Sprintf("%v", inst.K)) + case A: + w.WriteString("A") + default: + return fmt.Errorf("invalid BPF RET source instruction: %v", inst) + } + return nil +} + +func decodeMisc(inst linux.BPFInstruction, w *bytes.Buffer) error { + code := inst.OpCode & miscMask + switch code { + case Tax: + w.WriteString("X <- A") + case Txa: + w.WriteString("A <- X") + default: + return fmt.Errorf("invalid BPF ALU/JMP source instruction: %v", inst) + } + return nil +} diff --git a/pkg/bpf/decoder_test.go b/pkg/bpf/decoder_test.go new file mode 100644 index 000000000..18709b944 --- /dev/null +++ b/pkg/bpf/decoder_test.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +func TestDecode(t *testing.T) { + for _, test := range []struct { + filter linux.BPFInstruction + expected string + fail bool + }{ + {filter: Stmt(Ld+Imm, 10), expected: "A <- 10"}, + {filter: Stmt(Ld+Abs+W, 10), expected: "A <- P[10:4]"}, + {filter: Stmt(Ld+Ind+H, 10), expected: "A <- P[X+10:2]"}, + {filter: Stmt(Ld+Ind+B, 10), expected: "A <- P[X+10:1]"}, + {filter: Stmt(Ld+Mem, 10), expected: "A <- M[10]"}, + {filter: Stmt(Ld+Len, 0), expected: "A <- len"}, + {filter: Stmt(Ldx+Imm, 10), expected: "X <- 10"}, + {filter: Stmt(Ldx+Mem, 10), expected: "X <- M[10]"}, + {filter: Stmt(Ldx+Len, 0), expected: "X <- len"}, + {filter: Stmt(Ldx+Msh, 10), expected: "X <- 4*(P[10:1]&0xf)"}, + {filter: Stmt(St, 10), expected: "M[10] <- A"}, + {filter: Stmt(Stx, 10), expected: "M[10] <- X"}, + {filter: Stmt(Alu+Add+K, 10), expected: "A <- A + 10"}, + {filter: Stmt(Alu+Sub+K, 10), expected: "A <- A - 10"}, + {filter: Stmt(Alu+Mul+K, 10), expected: "A <- A * 10"}, + {filter: Stmt(Alu+Div+K, 10), expected: "A <- A / 10"}, + {filter: Stmt(Alu+Or+K, 10), expected: "A <- A | 10"}, + {filter: Stmt(Alu+And+K, 10), expected: "A <- A & 10"}, + {filter: Stmt(Alu+Lsh+K, 10), expected: "A <- A << 10"}, + {filter: Stmt(Alu+Rsh+K, 10), expected: "A <- A >> 10"}, + {filter: Stmt(Alu+Mod+K, 10), expected: "A <- A % 10"}, + {filter: Stmt(Alu+Xor+K, 10), expected: "A <- A ^ 10"}, + {filter: Stmt(Alu+Add+X, 0), expected: "A <- A + X"}, + {filter: Stmt(Alu+Sub+X, 0), expected: "A <- A - X"}, + {filter: Stmt(Alu+Mul+X, 0), expected: "A <- A * X"}, + {filter: Stmt(Alu+Div+X, 0), expected: "A <- A / X"}, + {filter: Stmt(Alu+Or+X, 0), expected: "A <- A | X"}, + {filter: Stmt(Alu+And+X, 0), expected: "A <- A & X"}, + {filter: Stmt(Alu+Lsh+X, 0), expected: "A <- A << X"}, + {filter: Stmt(Alu+Rsh+X, 0), expected: "A <- A >> X"}, + {filter: Stmt(Alu+Mod+X, 0), expected: "A <- A % X"}, + {filter: Stmt(Alu+Xor+X, 0), expected: "A <- A ^ X"}, + {filter: Stmt(Alu+Neg, 0), expected: "A <- -A"}, + {filter: Stmt(Jmp+Ja, 10), expected: "pc += 10"}, + {filter: Jump(Jmp+Jeq+K, 10, 2, 5), expected: "pc += (A == 10) ? 2 : 5"}, + {filter: Jump(Jmp+Jgt+K, 10, 2, 5), expected: "pc += (A > 10) ? 2 : 5"}, + {filter: Jump(Jmp+Jge+K, 10, 2, 5), expected: "pc += (A >= 10) ? 2 : 5"}, + {filter: Jump(Jmp+Jset+K, 10, 2, 5), expected: "pc += (A & 10) ? 2 : 5"}, + {filter: Jump(Jmp+Jeq+X, 0, 2, 5), expected: "pc += (A == X) ? 2 : 5"}, + {filter: Jump(Jmp+Jgt+X, 0, 2, 5), expected: "pc += (A > X) ? 2 : 5"}, + {filter: Jump(Jmp+Jge+X, 0, 2, 5), expected: "pc += (A >= X) ? 2 : 5"}, + {filter: Jump(Jmp+Jset+X, 0, 2, 5), expected: "pc += (A & X) ? 2 : 5"}, + {filter: Stmt(Ret+K, 10), expected: "ret 10"}, + {filter: Stmt(Ret+A, 0), expected: "ret A"}, + {filter: Stmt(Misc+Tax, 0), expected: "X <- A"}, + {filter: Stmt(Misc+Txa, 0), expected: "A <- X"}, + {filter: Stmt(Ld+Ind+Msh, 0), fail: true}, + } { + got, err := Decode(test.filter) + if test.fail { + if err == nil { + t.Errorf("Decode(%v) failed, expected: 'error', got: %q", test.filter, got) + continue + } + } else { + if err != nil { + t.Errorf("Decode(%v) failed for test %q, error: %q", test.filter, test.expected, err) + continue + } + if got != test.expected { + t.Errorf("Decode(%v) failed, expected: %q, got: %q", test.filter, test.expected, got) + continue + } + } + } +} + +func TestDecodeProgram(t *testing.T) { + for _, test := range []struct { + name string + program []linux.BPFInstruction + expected string + fail bool + }{ + {name: "basic with jump indexes", + program: []linux.BPFInstruction{ + Stmt(Ld+Abs+W, 10), + Stmt(Ldx+Mem, 10), + Stmt(St, 10), + Stmt(Stx, 10), + Stmt(Alu+Add+K, 10), + Stmt(Jmp+Ja, 10), + Jump(Jmp+Jeq+K, 10, 2, 5), + Jump(Jmp+Jset+X, 0, 0, 5), + Stmt(Misc+Tax, 0), + }, + expected: "0: A <- P[10:4]\n" + + "1: X <- M[10]\n" + + "2: M[10] <- A\n" + + "3: M[10] <- X\n" + + "4: A <- A + 10\n" + + "5: pc += 10 [16]\n" + + "6: pc += (A == 10) ? 2 [9] : 5 [12]\n" + + "7: pc += (A & X) ? 0 [8] : 5 [13]\n" + + "8: X <- A\n", + }, + {name: "invalid instruction", + program: []linux.BPFInstruction{Stmt(Ld+Abs+W, 10), Stmt(Ld+Len+Mem, 0)}, + fail: true}, + } { + got, err := DecodeProgram(test.program) + if test.fail { + if err == nil { + t.Errorf("%s: Decode(...) failed, expected: 'error', got: %q", test.name, got) + continue + } + } else { + if err != nil { + t.Errorf("%s: Decode failed: %v", test.name, err) + continue + } + if got != test.expected { + t.Errorf("%s: Decode(...) failed, expected: %q, got: %q", test.name, test.expected, got) + continue + } + } + } +} diff --git a/pkg/bpf/input_bytes.go b/pkg/bpf/input_bytes.go new file mode 100644 index 000000000..74af038eb --- /dev/null +++ b/pkg/bpf/input_bytes.go @@ -0,0 +1,58 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "encoding/binary" +) + +// InputBytes implements the Input interface by providing access to a byte +// slice. Unaligned loads are supported. +type InputBytes struct { + // Data is the data accessed through the Input interface. + Data []byte + + // Order is the byte order the data is accessed with. + Order binary.ByteOrder +} + +// Load32 implements Input.Load32. +func (i InputBytes) Load32(off uint32) (uint32, bool) { + if uint64(off)+4 > uint64(len(i.Data)) { + return 0, false + } + return i.Order.Uint32(i.Data[int(off):]), true +} + +// Load16 implements Input.Load16. +func (i InputBytes) Load16(off uint32) (uint16, bool) { + if uint64(off)+2 > uint64(len(i.Data)) { + return 0, false + } + return i.Order.Uint16(i.Data[int(off):]), true +} + +// Load8 implements Input.Load8. +func (i InputBytes) Load8(off uint32) (uint8, bool) { + if uint64(off)+1 > uint64(len(i.Data)) { + return 0, false + } + return i.Data[int(off)], true +} + +// Length implements Input.Length. +func (i InputBytes) Length() uint32 { + return uint32(len(i.Data)) +} diff --git a/pkg/bpf/interpreter.go b/pkg/bpf/interpreter.go new file mode 100644 index 000000000..b7dee86a8 --- /dev/null +++ b/pkg/bpf/interpreter.go @@ -0,0 +1,410 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// Possible values for ProgramError.Code. +const ( + // DivisionByZero indicates that a program contains, or executed, a + // division or modulo by zero. + DivisionByZero = iota + + // InvalidEndOfProgram indicates that the last instruction of a program is + // not a return. + InvalidEndOfProgram + + // InvalidInstructionCount indicates that a program has zero instructions + // or more than MaxInstructions instructions. + InvalidInstructionCount + + // InvalidJumpTarget indicates that a program contains a jump whose target + // is outside of the program's bounds. + InvalidJumpTarget + + // InvalidLoad indicates that a program executed an invalid load of input + // data. + InvalidLoad + + // InvalidOpcode indicates that a program contains an instruction with an + // invalid opcode. + InvalidOpcode + + // InvalidRegister indicates that a program contains a load from, or store + // to, a non-existent M register (index >= ScratchMemRegisters). + InvalidRegister +) + +// Error is an error encountered while compiling or executing a BPF program. +type Error struct { + // Code indicates the kind of error that occurred. + Code int + + // PC is the program counter (index into the list of instructions) at which + // the error occurred. + PC int +} + +func (e Error) codeString() string { + switch e.Code { + case DivisionByZero: + return "division by zero" + case InvalidEndOfProgram: + return "last instruction must be a return" + case InvalidInstructionCount: + return "invalid number of instructions" + case InvalidJumpTarget: + return "jump target out of bounds" + case InvalidLoad: + return "load out of bounds or violates input alignment requirements" + case InvalidOpcode: + return "invalid instruction opcode" + case InvalidRegister: + return "invalid M register" + default: + return "unknown error" + } +} + +// Error implements error.Error. +func (e Error) Error() string { + return fmt.Sprintf("at l%d: %s", e.PC, e.codeString()) +} + +// Program is a BPF program that has been validated for consistency. +type Program struct { + instructions []linux.BPFInstruction +} + +// Length returns the number of instructions in the program. +func (p Program) Length() int { + return len(p.instructions) +} + +// Compile performs validation on a sequence of BPF instructions before +// wrapping them in a Program. +func Compile(insns []linux.BPFInstruction) (Program, error) { + if len(insns) == 0 || len(insns) > MaxInstructions { + return Program{}, Error{InvalidInstructionCount, len(insns)} + } + + // The last instruction must be a return. + if last := insns[len(insns)-1]; last.OpCode != (Ret|K) && last.OpCode != (Ret|A) { + return Program{}, Error{InvalidEndOfProgram, len(insns) - 1} + } + + // Validate each instruction. Note that we skip a validation Linux does: + // Linux additionally verifies that every load from an M register is + // preceded, in every path, by a store to the same M register, in order to + // avoid having to clear M between programs + // (net/core/filter.c:check_load_and_stores). We always start with a zeroed + // M array. + for pc, i := range insns { + if i.OpCode&unusedBitsMask != 0 { + return Program{}, Error{InvalidOpcode, pc} + } + switch i.OpCode & instructionClassMask { + case Ld: + mode := i.OpCode & loadModeMask + switch i.OpCode & loadSizeMask { + case W: + if mode != Imm && mode != Abs && mode != Ind && mode != Mem && mode != Len { + return Program{}, Error{InvalidOpcode, pc} + } + if mode == Mem && i.K >= ScratchMemRegisters { + return Program{}, Error{InvalidRegister, pc} + } + case H, B: + if mode != Abs && mode != Ind { + return Program{}, Error{InvalidOpcode, pc} + } + default: + return Program{}, Error{InvalidOpcode, pc} + } + case Ldx: + mode := i.OpCode & loadModeMask + switch i.OpCode & loadSizeMask { + case W: + if mode != Imm && mode != Mem && mode != Len { + return Program{}, Error{InvalidOpcode, pc} + } + if mode == Mem && i.K >= ScratchMemRegisters { + return Program{}, Error{InvalidRegister, pc} + } + case B: + if mode != Msh { + return Program{}, Error{InvalidOpcode, pc} + } + default: + return Program{}, Error{InvalidOpcode, pc} + } + case St, Stx: + if i.OpCode&storeUnusedBitsMask != 0 { + return Program{}, Error{InvalidOpcode, pc} + } + if i.K >= ScratchMemRegisters { + return Program{}, Error{InvalidRegister, pc} + } + case Alu: + switch i.OpCode & aluMask { + case Add, Sub, Mul, Or, And, Lsh, Rsh, Xor: + break + case Div, Mod: + if src := i.OpCode & srcAluJmpMask; src == K && i.K == 0 { + return Program{}, Error{DivisionByZero, pc} + } + case Neg: + // Negation doesn't take a source operand. + if i.OpCode&srcAluJmpMask != 0 { + return Program{}, Error{InvalidOpcode, pc} + } + default: + return Program{}, Error{InvalidOpcode, pc} + } + case Jmp: + switch i.OpCode & jmpMask { + case Ja: + // Unconditional jump doesn't take a source operand. + if i.OpCode&srcAluJmpMask != 0 { + return Program{}, Error{InvalidOpcode, pc} + } + // Do the comparison in 64 bits to avoid the possibility of + // overflow from a very large i.K. + if uint64(pc)+uint64(i.K)+1 >= uint64(len(insns)) { + return Program{}, Error{InvalidJumpTarget, pc} + } + case Jeq, Jgt, Jge, Jset: + // jt and jf are uint16s, so there's no threat of overflow. + if pc+int(i.JumpIfTrue)+1 >= len(insns) { + return Program{}, Error{InvalidJumpTarget, pc} + } + if pc+int(i.JumpIfFalse)+1 >= len(insns) { + return Program{}, Error{InvalidJumpTarget, pc} + } + default: + return Program{}, Error{InvalidOpcode, pc} + } + case Ret: + if i.OpCode&retUnusedBitsMask != 0 { + return Program{}, Error{InvalidOpcode, pc} + } + if src := i.OpCode & srcRetMask; src != K && src != A { + return Program{}, Error{InvalidOpcode, pc} + } + case Misc: + if misc := i.OpCode & miscMask; misc != Tax && misc != Txa { + return Program{}, Error{InvalidOpcode, pc} + } + } + } + + return Program{insns}, nil +} + +// Input represents a source of input data for a BPF program. (BPF +// documentation sometimes refers to the input data as the "packet" due to its +// origins as a packet processing DSL.) +// +// For all of Input's Load methods: +// +// - The second (bool) return value is true if the load succeeded and false +// otherwise. +// +// - Inputs should not assume that the loaded range falls within the input +// data's length. Inputs should return false if the load falls outside of the +// input data. +// +// - Inputs should not assume that the offset is correctly aligned. Inputs may +// choose to service or reject loads to unaligned addresses. +type Input interface { + // Load32 reads 32 bits from the input starting at the given byte offset. + Load32(off uint32) (uint32, bool) + + // Load16 reads 16 bits from the input starting at the given byte offset. + Load16(off uint32) (uint16, bool) + + // Load8 reads 8 bits from the input starting at the given byte offset. + Load8(off uint32) (uint8, bool) + + // Length returns the length of the input in bytes. + Length() uint32 +} + +// machine represents the state of a BPF virtual machine. +type machine struct { + A uint32 + X uint32 + M [ScratchMemRegisters]uint32 +} + +func conditionalJumpOffset(insn linux.BPFInstruction, cond bool) int { + if cond { + return int(insn.JumpIfTrue) + } + return int(insn.JumpIfFalse) +} + +// Exec executes a BPF program over the given input and returns its return +// value. +func Exec(p Program, in Input) (uint32, error) { + var m machine + var pc int + for ; pc < len(p.instructions); pc++ { + i := p.instructions[pc] + switch i.OpCode { + case Ld | Imm | W: + m.A = i.K + case Ld | Abs | W: + val, ok := in.Load32(i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = val + case Ld | Abs | H: + val, ok := in.Load16(i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = uint32(val) + case Ld | Abs | B: + val, ok := in.Load8(i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = uint32(val) + case Ld | Ind | W: + val, ok := in.Load32(m.X + i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = val + case Ld | Ind | H: + val, ok := in.Load16(m.X + i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = uint32(val) + case Ld | Ind | B: + val, ok := in.Load8(m.X + i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.A = uint32(val) + case Ld | Mem | W: + m.A = m.M[int(i.K)] + case Ld | Len | W: + m.A = in.Length() + case Ldx | Imm | W: + m.X = i.K + case Ldx | Mem | W: + m.X = m.M[int(i.K)] + case Ldx | Len | W: + m.X = in.Length() + case Ldx | Msh | B: + val, ok := in.Load8(i.K) + if !ok { + return 0, Error{InvalidLoad, pc} + } + m.X = 4 * uint32(val&0xf) + case St: + m.M[int(i.K)] = m.A + case Stx: + m.M[int(i.K)] = m.X + case Alu | Add | K: + m.A += i.K + case Alu | Add | X: + m.A += m.X + case Alu | Sub | K: + m.A -= i.K + case Alu | Sub | X: + m.A -= m.X + case Alu | Mul | K: + m.A *= i.K + case Alu | Mul | X: + m.A *= m.X + case Alu | Div | K: + // K != 0 already checked by Compile. + m.A /= i.K + case Alu | Div | X: + if m.X == 0 { + return 0, Error{DivisionByZero, pc} + } + m.A /= m.X + case Alu | Or | K: + m.A |= i.K + case Alu | Or | X: + m.A |= m.X + case Alu | And | K: + m.A &= i.K + case Alu | And | X: + m.A &= m.X + case Alu | Lsh | K: + m.A <<= i.K + case Alu | Lsh | X: + m.A <<= m.X + case Alu | Rsh | K: + m.A >>= i.K + case Alu | Rsh | X: + m.A >>= m.X + case Alu | Neg: + m.A = uint32(-int32(m.A)) + case Alu | Mod | K: + // K != 0 already checked by Compile. + m.A %= i.K + case Alu | Mod | X: + if m.X == 0 { + return 0, Error{DivisionByZero, pc} + } + m.A %= m.X + case Alu | Xor | K: + m.A ^= i.K + case Alu | Xor | X: + m.A ^= m.X + case Jmp | Ja: + pc += int(i.K) + case Jmp | Jeq | K: + pc += conditionalJumpOffset(i, m.A == i.K) + case Jmp | Jeq | X: + pc += conditionalJumpOffset(i, m.A == m.X) + case Jmp | Jgt | K: + pc += conditionalJumpOffset(i, m.A > i.K) + case Jmp | Jgt | X: + pc += conditionalJumpOffset(i, m.A > m.X) + case Jmp | Jge | K: + pc += conditionalJumpOffset(i, m.A >= i.K) + case Jmp | Jge | X: + pc += conditionalJumpOffset(i, m.A >= m.X) + case Jmp | Jset | K: + pc += conditionalJumpOffset(i, (m.A&i.K) != 0) + case Jmp | Jset | X: + pc += conditionalJumpOffset(i, (m.A&m.X) != 0) + case Ret | K: + return i.K, nil + case Ret | A: + return m.A, nil + case Misc | Tax: + m.A = m.X + case Misc | Txa: + m.X = m.A + default: + return 0, Error{InvalidOpcode, pc} + } + } + return 0, Error{InvalidEndOfProgram, pc} +} diff --git a/pkg/bpf/interpreter_test.go b/pkg/bpf/interpreter_test.go new file mode 100644 index 000000000..9e5e33228 --- /dev/null +++ b/pkg/bpf/interpreter_test.go @@ -0,0 +1,797 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" +) + +func TestCompilationErrors(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // insns is the BPF instructions to be compiled. + insns []linux.BPFInstruction + + // expectedErr is the expected compilation error. + expectedErr error + }{ + { + desc: "Instructions must not be nil", + expectedErr: Error{InvalidInstructionCount, 0}, + }, + { + desc: "Instructions must not be empty", + insns: []linux.BPFInstruction{}, + expectedErr: Error{InvalidInstructionCount, 0}, + }, + { + desc: "A program must end with a return", + insns: make([]linux.BPFInstruction, MaxInstructions), + expectedErr: Error{InvalidEndOfProgram, MaxInstructions - 1}, + }, + { + desc: "A program must have MaxInstructions or fewer instructions", + insns: append(make([]linux.BPFInstruction, MaxInstructions), Stmt(Ret|K, 0)), + expectedErr: Error{InvalidInstructionCount, MaxInstructions + 1}, + }, + { + desc: "A load from an invalid M register is a compilation error", + insns: []linux.BPFInstruction{ + Stmt(Ld|Mem|W, ScratchMemRegisters), // A = M[16] + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidRegister, 0}, + }, + { + desc: "A store to an invalid M register is a compilation error", + insns: []linux.BPFInstruction{ + Stmt(St, ScratchMemRegisters), // M[16] = A + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidRegister, 0}, + }, + { + desc: "Division by literal zero is a compilation error", + insns: []linux.BPFInstruction{ + Stmt(Alu|Div|K, 0), // A /= 0 + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{DivisionByZero, 0}, + }, + { + desc: "An unconditional jump outside of the program is a compilation error", + insns: []linux.BPFInstruction{ + Jump(Jmp|Ja, 1, 0, 0), // jmp nextpc+1 + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidJumpTarget, 0}, + }, + { + desc: "A conditional jump outside of the program in the true case is a compilation error", + insns: []linux.BPFInstruction{ + Jump(Jmp|Jeq|K, 0, 1, 0), // if (A == K) jmp nextpc+1 + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidJumpTarget, 0}, + }, + { + desc: "A conditional jump outside of the program in the false case is a compilation error", + insns: []linux.BPFInstruction{ + Jump(Jmp|Jeq|K, 0, 0, 1), // if (A != K) jmp nextpc+1 + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidJumpTarget, 0}, + }, + } { + _, err := Compile(test.insns) + if err != test.expectedErr { + t.Errorf("%s: expected error %q, got error %q", test.desc, test.expectedErr, err) + } + } +} + +func TestExecErrors(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // insns is the BPF instructions to be executed. + insns []linux.BPFInstruction + + // expectedErr is the expected execution error. + expectedErr error + }{ + { + desc: "An out-of-bounds load of input data is an execution error", + insns: []linux.BPFInstruction{ + Stmt(Ld|Abs|B, 0), // A = input[0] + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{InvalidLoad, 0}, + }, + { + desc: "Division by zero at runtime is an execution error", + insns: []linux.BPFInstruction{ + Stmt(Alu|Div|X, 0), // A /= X + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{DivisionByZero, 0}, + }, + { + desc: "Modulo zero at runtime is an execution error", + insns: []linux.BPFInstruction{ + Stmt(Alu|Mod|X, 0), // A %= X + Stmt(Ret|K, 0), // return 0 + }, + expectedErr: Error{DivisionByZero, 0}, + }, + } { + p, err := Compile(test.insns) + if err != nil { + t.Errorf("%s: unexpected compilation error: %v", test.desc, err) + continue + } + ret, err := Exec(p, InputBytes{nil, binary.BigEndian}) + if err != test.expectedErr { + t.Errorf("%s: expected execution error %q, got (%d, %v)", test.desc, test.expectedErr, ret, err) + } + } +} + +func TestValidInstructions(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // insns is the BPF instructions to be compiled. + insns []linux.BPFInstruction + + // input is the input data. Note that input will be read as big-endian. + input []byte + + // expectedRet is the expected return value of the BPF program. + expectedRet uint32 + }{ + { + desc: "Return of immediate", + insns: []linux.BPFInstruction{ + Stmt(Ret|K, 42), // return 42 + }, + expectedRet: 42, + }, + { + desc: "Load of immediate into A", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 42, + }, + { + desc: "Load of immediate into X and copying of X into A", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Imm|W, 42), // X = 42 + Stmt(Misc|Tax, 0), // A = X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 42, + }, + { + desc: "Copying of A into X and back", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Stmt(Misc|Txa, 0), // X = A + Stmt(Ld|Imm|W, 0), // A = 0 + Stmt(Misc|Tax, 0), // A = X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 42, + }, + { + desc: "Load of 32-bit input by absolute offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ld|Abs|W, 1), // A = input[1..4] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11, 0x22, 0x33, 0x44}, + expectedRet: 0x11223344, + }, + { + desc: "Load of 16-bit input by absolute offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ld|Abs|H, 1), // A = input[1..2] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11, 0x22}, + expectedRet: 0x1122, + }, + { + desc: "Load of 8-bit input by absolute offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ld|Abs|B, 1), // A = input[1] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11}, + expectedRet: 0x11, + }, + { + desc: "Load of 32-bit input by relative offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Imm|W, 1), // X = 1 + Stmt(Ld|Ind|W, 1), // A = input[X+1..X+4] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, + expectedRet: 0x22334455, + }, + { + desc: "Load of 16-bit input by relative offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Imm|W, 1), // X = 1 + Stmt(Ld|Ind|H, 1), // A = input[X+1..X+2] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11, 0x22, 0x33}, + expectedRet: 0x2233, + }, + { + desc: "Load of 8-bit input by relative offset into A", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Imm|W, 1), // X = 1 + Stmt(Ld|Ind|B, 1), // A = input[X+1] + Stmt(Ret|A, 0), // return A + }, + input: []byte{0x00, 0x11, 0x22}, + expectedRet: 0x22, + }, + { + desc: "Load/store between A and scratch memory", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Stmt(St, 2), // M[2] = A + Stmt(Ld|Imm|W, 0), // A = 0 + Stmt(Ld|Mem|W, 2), // A = M[2] + Stmt(Ret|A, 0), // return A + }, + expectedRet: 42, + }, + { + desc: "Load/store between X and scratch memory", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Imm|W, 42), // X = 42 + Stmt(Stx, 3), // M[3] = X + Stmt(Ldx|Imm|W, 0), // X = 0 + Stmt(Ldx|Mem|W, 3), // X = M[3] + Stmt(Misc|Tax, 0), // A = X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 42, + }, + { + desc: "Load of input length into A", + insns: []linux.BPFInstruction{ + Stmt(Ld|Len|W, 0), // A = len(input) + Stmt(Ret|A, 0), // return A + }, + input: []byte{1, 2, 3}, + expectedRet: 3, + }, + { + desc: "Load of input length into X", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Len|W, 0), // X = len(input) + Stmt(Misc|Tax, 0), // A = X + Stmt(Ret|A, 0), // return A + }, + input: []byte{1, 2, 3}, + expectedRet: 3, + }, + { + desc: "Load of MSH (?) into X", + insns: []linux.BPFInstruction{ + Stmt(Ldx|Msh|B, 0), // X = 4*(input[0]&0xf) + Stmt(Misc|Tax, 0), // A = X + Stmt(Ret|A, 0), // return A + }, + input: []byte{0xf1}, + expectedRet: 4, + }, + { + desc: "Addition of immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Alu|Add|K, 20), // A += 20 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 30, + }, + { + desc: "Addition of X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Ldx|Imm|W, 20), // X = 20 + Stmt(Alu|Add|X, 0), // A += X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 30, + }, + { + desc: "Subtraction of immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 30), // A = 30 + Stmt(Alu|Sub|K, 20), // A -= 20 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 10, + }, + { + desc: "Subtraction of X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 30), // A = 30 + Stmt(Ldx|Imm|W, 20), // X = 20 + Stmt(Alu|Sub|X, 0), // A -= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 10, + }, + { + desc: "Multiplication of immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 2), // A = 2 + Stmt(Alu|Mul|K, 3), // A *= 3 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 6, + }, + { + desc: "Multiplication of X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 2), // A = 2 + Stmt(Ldx|Imm|W, 3), // X = 3 + Stmt(Alu|Mul|X, 0), // A *= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 6, + }, + { + desc: "Division by immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 6), // A = 6 + Stmt(Alu|Div|K, 3), // A /= 3 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 2, + }, + { + desc: "Division by X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 6), // A = 6 + Stmt(Ldx|Imm|W, 3), // X = 3 + Stmt(Alu|Div|X, 0), // A /= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 2, + }, + { + desc: "Modulo immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 17), // A = 17 + Stmt(Alu|Mod|K, 7), // A %= 7 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 3, + }, + { + desc: "Modulo X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 17), // A = 17 + Stmt(Ldx|Imm|W, 7), // X = 7 + Stmt(Alu|Mod|X, 0), // A %= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 3, + }, + { + desc: "Arithmetic negation", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 1), // A = 1 + Stmt(Alu|Neg, 0), // A = -A + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0xffffffff, + }, + { + desc: "Bitwise OR with immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Alu|Or|K, 0xff0055aa), // A |= 0xff0055aa + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0xff00ffff, + }, + { + desc: "Bitwise OR with X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa + Stmt(Alu|Or|X, 0), // A |= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0xff00ffff, + }, + { + desc: "Bitwise AND with immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Alu|And|K, 0xff0055aa), // A &= 0xff0055aa + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0xff000000, + }, + { + desc: "Bitwise AND with X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa + Stmt(Alu|And|X, 0), // A &= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0xff000000, + }, + { + desc: "Bitwise XOR with immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Alu|Xor|K, 0xff0055aa), // A ^= 0xff0055aa + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0x0000ffff, + }, + { + desc: "Bitwise XOR with X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff00aa55), // A = 0xff00aa55 + Stmt(Ldx|Imm|W, 0xff0055aa), // X = 0xff0055aa + Stmt(Alu|Xor|X, 0), // A ^= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 0x0000ffff, + }, + { + desc: "Left shift by immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 1), // A = 1 + Stmt(Alu|Lsh|K, 5), // A <<= 5 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 32, + }, + { + desc: "Left shift by X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 1), // A = 1 + Stmt(Ldx|Imm|W, 5), // X = 5 + Stmt(Alu|Lsh|X, 0), // A <<= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 32, + }, + { + desc: "Right shift by immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xffffffff), // A = 0xffffffff + Stmt(Alu|Rsh|K, 31), // A >>= 31 + Stmt(Ret|A, 0), // return A + }, + expectedRet: 1, + }, + { + desc: "Right shift by X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xffffffff), // A = 0xffffffff + Stmt(Ldx|Imm|W, 31), // X = 31 + Stmt(Alu|Rsh|X, 0), // A >>= X + Stmt(Ret|A, 0), // return A + }, + expectedRet: 1, + }, + { + desc: "Unconditional jump", + insns: []linux.BPFInstruction{ + Jump(Jmp|Ja, 1, 0, 0), // jmp nextpc+1 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + }, + expectedRet: 1, + }, + { + desc: "Jump when A == immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Jump(Jmp|Jeq|K, 42, 1, 2), // if (A == 42) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A != immediate", + insns: []linux.BPFInstruction{ + Jump(Jmp|Jeq|K, 42, 1, 2), // if (A == 42) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A == X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Stmt(Ldx|Imm|W, 42), // X = 42 + Jump(Jmp|Jeq|X, 0, 1, 2), // if (A == X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A != X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 42), // A = 42 + Jump(Jmp|Jeq|X, 0, 1, 2), // if (A == X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A > immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Jump(Jmp|Jgt|K, 9, 1, 2), // if (A > 9) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A <= immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Jump(Jmp|Jgt|K, 10, 1, 2), // if (A > 10) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A > X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Ldx|Imm|W, 9), // X = 9 + Jump(Jmp|Jgt|X, 0, 1, 2), // if (A > X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A <= X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Ldx|Imm|W, 10), // X = 10 + Jump(Jmp|Jgt|X, 0, 1, 2), // if (A > X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A >= immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Jump(Jmp|Jge|K, 10, 1, 2), // if (A >= 10) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A < immediate", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Jump(Jmp|Jge|K, 11, 1, 2), // if (A >= 11) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A >= X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Ldx|Imm|W, 10), // X = 10 + Jump(Jmp|Jge|X, 0, 1, 2), // if (A >= X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A < X", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 10), // A = 10 + Stmt(Ldx|Imm|W, 11), // X = 11 + Jump(Jmp|Jge|X, 0, 1, 2), // if (A >= X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A & immediate != 0", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff), // A = 0xff + Jump(Jmp|Jset|K, 0x101, 1, 2), // if (A & 0x101) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A & immediate == 0", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xfe), // A = 0xfe + Jump(Jmp|Jset|K, 0x101, 1, 2), // if (A & 0x101) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + { + desc: "Jump when A & X != 0", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xff), // A = 0xff + Stmt(Ldx|Imm|W, 0x101), // X = 0x101 + Jump(Jmp|Jset|X, 0, 1, 2), // if (A & X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 1, + }, + { + desc: "Jump when A & X == 0", + insns: []linux.BPFInstruction{ + Stmt(Ld|Imm|W, 0xfe), // A = 0xfe + Stmt(Ldx|Imm|W, 0x101), // X = 0x101 + Jump(Jmp|Jset|X, 0, 1, 2), // if (A & X) jmp nextpc+1 else jmp nextpc+2 + Stmt(Ret|K, 0), // return 0 + Stmt(Ret|K, 1), // return 1 + Stmt(Ret|K, 2), // return 2 + }, + expectedRet: 2, + }, + } { + p, err := Compile(test.insns) + if err != nil { + t.Errorf("%s: unexpected compilation error: %v", test.desc, err) + continue + } + ret, err := Exec(p, InputBytes{test.input, binary.BigEndian}) + if err != nil { + t.Errorf("%s: expected return value of %d, got execution error: %v", test.desc, test.expectedRet, err) + continue + } + if ret != test.expectedRet { + t.Errorf("%s: expected return value of %d, got value %d", test.desc, test.expectedRet, ret) + } + } +} + +func TestSimpleFilter(t *testing.T) { + // Seccomp filter example given in Linux's + // Documentation/networking/filter.txt, translated to bytecode using the + // Linux kernel tree's tools/net/bpf_asm. + filter := []linux.BPFInstruction{ + {0x20, 0, 0, 0x00000004}, // ld [4] /* offsetof(struct seccomp_data, arch) */ + {0x15, 0, 11, 0xc000003e}, // jne #0xc000003e, bad /* AUDIT_ARCH_X86_64 */ + {0x20, 0, 0, 0000000000}, // ld [0] /* offsetof(struct seccomp_data, nr) */ + {0x15, 10, 0, 0x0000000f}, // jeq #15, good /* __NR_rt_sigreturn */ + {0x15, 9, 0, 0x000000e7}, // jeq #231, good /* __NR_exit_group */ + {0x15, 8, 0, 0x0000003c}, // jeq #60, good /* __NR_exit */ + {0x15, 7, 0, 0000000000}, // jeq #0, good /* __NR_read */ + {0x15, 6, 0, 0x00000001}, // jeq #1, good /* __NR_write */ + {0x15, 5, 0, 0x00000005}, // jeq #5, good /* __NR_fstat */ + {0x15, 4, 0, 0x00000009}, // jeq #9, good /* __NR_mmap */ + {0x15, 3, 0, 0x0000000e}, // jeq #14, good /* __NR_rt_sigprocmask */ + {0x15, 2, 0, 0x0000000d}, // jeq #13, good /* __NR_rt_sigaction */ + {0x15, 1, 0, 0x00000023}, // jeq #35, good /* __NR_nanosleep */ + {0x06, 0, 0, 0000000000}, // bad: ret #0 /* SECCOMP_RET_KILL */ + {0x06, 0, 0, 0x7fff0000}, // good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */ + } + p, err := Compile(filter) + if err != nil { + t.Fatalf("Unexpected compilation error: %v", err) + } + + for _, test := range []struct { + // desc is the test's description. + desc string + + // seccompData is the input data. + seccompData + + // expectedRet is the expected return value of the BPF program. + expectedRet uint32 + }{ + { + desc: "Invalid arch is rejected", + seccompData: seccompData{nr: 1 /* x86 exit */, arch: 0x40000003 /* AUDIT_ARCH_I386 */}, + expectedRet: 0, + }, + { + desc: "Disallowed syscall is rejected", + seccompData: seccompData{nr: 105 /* __NR_setuid */, arch: 0xc000003e}, + expectedRet: 0, + }, + { + desc: "Whitelisted syscall is allowed", + seccompData: seccompData{nr: 231 /* __NR_exit_group */, arch: 0xc000003e}, + expectedRet: 0x7fff0000, + }, + } { + ret, err := Exec(p, test.seccompData.asInput()) + if err != nil { + t.Errorf("%s: expected return value of %d, got execution error: %v", test.desc, test.expectedRet, err) + continue + } + if ret != test.expectedRet { + t.Errorf("%s: expected return value of %d, got value %d", test.desc, test.expectedRet, ret) + } + } +} + +// seccompData is equivalent to struct seccomp_data. +type seccompData struct { + nr uint32 + arch uint32 + instructionPointer uint64 + args [6]uint64 +} + +// asInput converts a seccompData to a bpf.Input. +func (d *seccompData) asInput() Input { + return InputBytes{binary.Marshal(nil, binary.LittleEndian, d), binary.LittleEndian} +} diff --git a/pkg/bpf/program_builder.go b/pkg/bpf/program_builder.go new file mode 100644 index 000000000..7554d47c1 --- /dev/null +++ b/pkg/bpf/program_builder.go @@ -0,0 +1,159 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "fmt" + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +const labelTarget = math.MaxUint8 + +// ProgramBuilder assists with building a BPF program with jump +// labels that are resolved to their proper offsets. +type ProgramBuilder struct { + // Maps label names to label objects. + labels map[string]*label + + // Array of BPF instructions that makes up the program. + instructions []linux.BPFInstruction +} + +// NewProgramBuilder creates a new ProgramBuilder instance. +func NewProgramBuilder() *ProgramBuilder { + return &ProgramBuilder{labels: map[string]*label{}} +} + +// label contains information to resolve a label to an offset. +type label struct { + // List of locations that reference the label in the program. + sources []source + + // Program line when the label is located. + target int +} + +// source contains information about a single reference to a label. +type source struct { + // Program line where the label reference is present. + line int + + // True if label reference is in the 'jump if true' part of the jump. + // False if label reference is in the 'jump if false' part of the jump. + jt bool +} + +// AddStmt adds a new statement to the program. +func (b *ProgramBuilder) AddStmt(code uint16, k uint32) { + b.instructions = append(b.instructions, Stmt(code, k)) +} + +// AddJump adds a new jump to the program. +func (b *ProgramBuilder) AddJump(code uint16, k uint32, jt, jf uint8) { + b.instructions = append(b.instructions, Jump(code, k, jt, jf)) +} + +// AddJumpTrueLabel adds a new jump to the program where 'jump if true' is a label. +func (b *ProgramBuilder) AddJumpTrueLabel(code uint16, k uint32, jtLabel string, jf uint8) { + b.addLabelSource(jtLabel, true) + b.AddJump(code, k, labelTarget, jf) +} + +// AddJumpFalseLabel adds a new jump to the program where 'jump if false' is a label. +func (b *ProgramBuilder) AddJumpFalseLabel(code uint16, k uint32, jt uint8, jfLabel string) { + b.addLabelSource(jfLabel, false) + b.AddJump(code, k, jt, math.MaxUint8) +} + +// AddJumpLabels adds a new jump to the program where both jump targets are labels. +func (b *ProgramBuilder) AddJumpLabels(code uint16, k uint32, jtLabel, jfLabel string) { + b.addLabelSource(jtLabel, true) + b.addLabelSource(jfLabel, false) + b.AddJump(code, k, math.MaxUint8, math.MaxUint8) +} + +// AddLabel sets the given label name at the current location. The next instruction is executed +// when the any code jumps to this label. More than one label can be added to the same location. +func (b *ProgramBuilder) AddLabel(name string) error { + l, ok := b.labels[name] + if !ok { + // This is done to catch jump backwards cases, but it's not strictly wrong + // to have unused labels. + return fmt.Errorf("Adding a label that hasn't been used is not allowed: %v", name) + } + if l.target != -1 { + return fmt.Errorf("label %q target already set: %v", name, l.target) + } + l.target = len(b.instructions) + return nil +} + +// Instructions returns an array of BPF instructions representing the program with all labels +// resolved. Return error in case label resolution failed due to an invalid program. +func (b *ProgramBuilder) Instructions() ([]linux.BPFInstruction, error) { + if err := b.resolveLabels(); err != nil { + return nil, err + } + return b.instructions, nil +} + +func (b *ProgramBuilder) addLabelSource(labelName string, jt bool) { + l, ok := b.labels[labelName] + if !ok { + l = &label{sources: make([]source, 0), target: -1} + b.labels[labelName] = l + } + l.sources = append(l.sources, source{line: len(b.instructions), jt: jt}) +} + +func (b *ProgramBuilder) resolveLabels() error { + for key, v := range b.labels { + if v.target == -1 { + return fmt.Errorf("label target not set: %v", key) + } + if v.target >= len(b.instructions) { + return fmt.Errorf("target is beyond end of ProgramBuilder") + } + for _, s := range v.sources { + // Finds jump instruction that references the label. + inst := b.instructions[s.line] + if s.line >= v.target { + return fmt.Errorf("cannot jump backwards") + } + // Calculates the jump offset from current line. + offset := v.target - s.line - 1 + if offset > math.MaxUint8 { + return fmt.Errorf("jump offset to label '%v' is too large: %v", key, offset) + } + // Sets offset into jump instruction. + if s.jt { + if inst.JumpIfTrue != labelTarget { + return fmt.Errorf("jump target is not a label") + } + inst.JumpIfTrue = uint8(offset) + } else { + if inst.JumpIfFalse != labelTarget { + return fmt.Errorf("jump target is not a label") + } + inst.JumpIfFalse = uint8(offset) + } + b.instructions[s.line] = inst + } + } + b.labels = map[string]*label{} + return nil +} diff --git a/pkg/bpf/program_builder_test.go b/pkg/bpf/program_builder_test.go new file mode 100644 index 000000000..7e4f06584 --- /dev/null +++ b/pkg/bpf/program_builder_test.go @@ -0,0 +1,157 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bpf + +import ( + "fmt" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +func validate(p *ProgramBuilder, expected []linux.BPFInstruction) error { + instructions, err := p.Instructions() + if err != nil { + return fmt.Errorf("Instructions() failed: %v", err) + } + got, err := DecodeProgram(instructions) + if err != nil { + return fmt.Errorf("DecodeProgram('instructions') failed: %v", err) + } + expectedDecoded, err := DecodeProgram(expected) + if err != nil { + return fmt.Errorf("DecodeProgram('expected') failed: %v", err) + } + if got != expectedDecoded { + return fmt.Errorf("DecodeProgram() failed, expected: %q, got: %q", expectedDecoded, got) + } + return nil +} + +func TestProgramBuilderSimple(t *testing.T) { + p := NewProgramBuilder() + p.AddStmt(Ld+Abs+W, 10) + p.AddJump(Jmp+Ja, 10, 0, 0) + + expected := []linux.BPFInstruction{ + Stmt(Ld+Abs+W, 10), + Jump(Jmp+Ja, 10, 0, 0), + } + + if err := validate(p, expected); err != nil { + t.Errorf("Validate() failed: %v", err) + } +} + +func TestProgramBuilderLabels(t *testing.T) { + p := NewProgramBuilder() + p.AddJumpTrueLabel(Jmp+Jeq+K, 11, "label_1", 0) + p.AddJumpFalseLabel(Jmp+Jeq+K, 12, 0, "label_2") + p.AddJumpLabels(Jmp+Jeq+K, 13, "label_3", "label_4") + if err := p.AddLabel("label_1"); err != nil { + t.Errorf("AddLabel(label_1) failed: %v", err) + } + p.AddStmt(Ld+Abs+W, 1) + if err := p.AddLabel("label_3"); err != nil { + t.Errorf("AddLabel(label_3) failed: %v", err) + } + p.AddJumpLabels(Jmp+Jeq+K, 14, "label_4", "label_5") + if err := p.AddLabel("label_2"); err != nil { + t.Errorf("AddLabel(label_2) failed: %v", err) + } + p.AddJumpLabels(Jmp+Jeq+K, 15, "label_4", "label_6") + if err := p.AddLabel("label_4"); err != nil { + t.Errorf("AddLabel(label_4) failed: %v", err) + } + p.AddStmt(Ld+Abs+W, 4) + if err := p.AddLabel("label_5"); err != nil { + t.Errorf("AddLabel(label_5) failed: %v", err) + } + if err := p.AddLabel("label_6"); err != nil { + t.Errorf("AddLabel(label_6) failed: %v", err) + } + p.AddStmt(Ld+Abs+W, 5) + + expected := []linux.BPFInstruction{ + Jump(Jmp+Jeq+K, 11, 2, 0), + Jump(Jmp+Jeq+K, 12, 0, 3), + Jump(Jmp+Jeq+K, 13, 1, 3), + Stmt(Ld+Abs+W, 1), + Jump(Jmp+Jeq+K, 14, 1, 2), + Jump(Jmp+Jeq+K, 15, 0, 1), + Stmt(Ld+Abs+W, 4), + Stmt(Ld+Abs+W, 5), + } + if err := validate(p, expected); err != nil { + t.Errorf("Validate() failed: %v", err) + } + // Calling validate()=>p.Instructions() again to make sure + // Instructions can be called multiple times without ruining + // the program. + if err := validate(p, expected); err != nil { + t.Errorf("Validate() failed: %v", err) + } +} + +func TestProgramBuilderMissingErrorTarget(t *testing.T) { + p := NewProgramBuilder() + p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0) + if _, err := p.Instructions(); err == nil { + t.Errorf("Instructions() should have failed") + } +} + +func TestProgramBuilderLabelWithNoInstruction(t *testing.T) { + p := NewProgramBuilder() + p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0) + if err := p.AddLabel("label_1"); err != nil { + t.Errorf("AddLabel(label_1) failed: %v", err) + } + if _, err := p.Instructions(); err == nil { + t.Errorf("Instructions() should have failed") + } +} + +func TestProgramBuilderUnusedLabel(t *testing.T) { + p := NewProgramBuilder() + if err := p.AddLabel("unused"); err == nil { + t.Errorf("AddLabel(unused) should have failed") + } +} + +func TestProgramBuilderLabelAddedTwice(t *testing.T) { + p := NewProgramBuilder() + p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0) + if err := p.AddLabel("label_1"); err != nil { + t.Errorf("AddLabel(label_1) failed: %v", err) + } + p.AddStmt(Ld+Abs+W, 0) + if err := p.AddLabel("label_1"); err == nil { + t.Errorf("AddLabel(label_1) failed: %v", err) + } +} + +func TestProgramBuilderJumpBackwards(t *testing.T) { + p := NewProgramBuilder() + p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0) + if err := p.AddLabel("label_1"); err != nil { + t.Errorf("AddLabel(label_1) failed: %v", err) + } + p.AddStmt(Ld+Abs+W, 0) + p.AddJumpTrueLabel(Jmp+Jeq+K, 10, "label_1", 0) + if _, err := p.Instructions(); err == nil { + t.Errorf("Instructions() should have failed") + } +} diff --git a/pkg/compressio/BUILD b/pkg/compressio/BUILD new file mode 100644 index 000000000..721b2d983 --- /dev/null +++ b/pkg/compressio/BUILD @@ -0,0 +1,18 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "compressio", + srcs = ["compressio.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/compressio", + visibility = ["//:sandbox"], + deps = ["//pkg/binary"], +) + +go_test( + name = "compressio_test", + size = "medium", + srcs = ["compressio_test.go"], + embed = [":compressio"], +) diff --git a/pkg/compressio/compressio.go b/pkg/compressio/compressio.go new file mode 100644 index 000000000..e0d36aee9 --- /dev/null +++ b/pkg/compressio/compressio.go @@ -0,0 +1,559 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compressio provides parallel compression and decompression. +package compressio + +import ( + "bytes" + "compress/flate" + "errors" + "io" + "runtime" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/binary" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +var chunkPool = sync.Pool{ + New: func() interface{} { + return new(chunk) + }, +} + +// chunk is a unit of work. +type chunk struct { + // compressed is compressed data. + // + // This will always be returned to the bufPool directly when work has + // finished (in schedule) and therefore must be allocated. + compressed *bytes.Buffer + + // uncompressed is the uncompressed data. + // + // This is not returned to the bufPool automatically, since it may + // correspond to a inline slice (provided directly to Read or Write). + uncompressed *bytes.Buffer +} + +// newChunk allocates a new chunk object (or pulls one from the pool). Buffers +// will be allocated if nil is provided for compressed or uncompressed. +func newChunk(compressed *bytes.Buffer, uncompressed *bytes.Buffer) *chunk { + c := chunkPool.Get().(*chunk) + if compressed != nil { + c.compressed = compressed + } else { + c.compressed = bufPool.Get().(*bytes.Buffer) + } + if uncompressed != nil { + c.uncompressed = uncompressed + } else { + c.uncompressed = bufPool.Get().(*bytes.Buffer) + } + return c +} + +// result is the result of some work; it includes the original chunk. +type result struct { + *chunk + err error +} + +// worker is a compression/decompression worker. +// +// The associated worker goroutine reads in uncompressed buffers from input and +// writes compressed buffers to its output. Alternatively, the worker reads +// compressed buffers from input and writes uncompressed buffers to its output. +// +// The goroutine will exit when input is closed, and the goroutine will close +// output. +type worker struct { + input chan *chunk + output chan result +} + +// work is the main work routine; see worker. +func (w *worker) work(compress bool, level int) { + defer close(w.output) + + for c := range w.input { + if compress { + // Encode this slice. + fw, err := flate.NewWriter(c.compressed, level) + if err != nil { + w.output <- result{c, err} + continue + } + + // Encode the input. + if _, err := io.Copy(fw, c.uncompressed); err != nil { + w.output <- result{c, err} + continue + } + if err := fw.Close(); err != nil { + w.output <- result{c, err} + continue + } + } else { + // Decode this slice. + fr := flate.NewReader(c.compressed) + + // Decode the input. + if _, err := io.Copy(c.uncompressed, fr); err != nil { + w.output <- result{c, err} + continue + } + } + + // Send the output. + w.output <- result{c, nil} + } +} + +// pool is common functionality for reader/writers. +type pool struct { + // workers are the compression/decompression workers. + workers []worker + + // chunkSize is the chunk size. This is the first four bytes in the + // stream and is shared across both the reader and writer. + chunkSize uint32 + + // mu protects below; it is generally the responsibility of users to + // acquire this mutex before calling any methods on the pool. + mu sync.Mutex + + // nextInput is the next worker for input (scheduling). + nextInput int + + // nextOutput is the next worker for output (result). + nextOutput int + + // buf is the current active buffer; the exact semantics of this buffer + // depending on whether this is a reader or a writer. + buf *bytes.Buffer +} + +// init initializes the worker pool. +// +// This should only be called once. +func (p *pool) init(compress bool, level int) { + p.workers = make([]worker, 1+runtime.GOMAXPROCS(0)) + for i := 0; i < len(p.workers); i++ { + p.workers[i] = worker{ + input: make(chan *chunk, 1), + output: make(chan result, 1), + } + go p.workers[i].work(compress, level) // S/R-SAFE: In save path only. + } + runtime.SetFinalizer(p, (*pool).stop) +} + +// stop stops all workers. +func (p *pool) stop() { + for i := 0; i < len(p.workers); i++ { + close(p.workers[i].input) + } + p.workers = nil +} + +// handleResult calls the callback. +func handleResult(r result, callback func(*chunk) error) error { + defer func() { + r.chunk.compressed.Reset() + bufPool.Put(r.chunk.compressed) + chunkPool.Put(r.chunk) + }() + if r.err != nil { + return r.err + } + if err := callback(r.chunk); err != nil { + return err + } + return nil +} + +// schedule schedules the given buffers. +// +// If c is non-nil, then it will return as soon as the chunk is scheduled. If c +// is nil, then it will return only when no more work is left to do. +// +// If no callback function is provided, then the output channel will be +// ignored. You must be sure that the input is schedulable in this case. +func (p *pool) schedule(c *chunk, callback func(*chunk) error) error { + for { + var ( + inputChan chan *chunk + outputChan chan result + ) + if c != nil { + inputChan = p.workers[(p.nextInput+1)%len(p.workers)].input + } + if callback != nil && p.nextOutput != p.nextInput { + outputChan = p.workers[(p.nextOutput+1)%len(p.workers)].output + } + if inputChan == nil && outputChan == nil { + return nil + } + + select { + case inputChan <- c: + p.nextInput++ + return nil + case r := <-outputChan: + p.nextOutput++ + if err := handleResult(r, callback); err != nil { + return err + } + } + } +} + +// reader chunks reads and decompresses. +type reader struct { + pool + + // in is the source. + in io.Reader +} + +// NewReader returns a new compressed reader. +func NewReader(in io.Reader) (io.Reader, error) { + r := &reader{ + in: in, + } + r.init(false, 0) + var err error + if r.chunkSize, err = binary.ReadUint32(r.in, binary.BigEndian); err != nil { + return nil, err + } + return r, nil +} + +// errNewBuffer is returned when a new buffer is completed. +var errNewBuffer = errors.New("buffer ready") + +// Read implements io.Reader.Read. +func (r *reader) Read(p []byte) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + + // Total bytes completed; this is declared up front because it must be + // adjustable by the callback below. + done := 0 + + // Total bytes pending in the asynchronous workers for buffers. This is + // used to process the proper regions of the input as inline buffers. + var ( + pendingPre = r.nextInput - r.nextOutput + pendingInline = 0 + ) + + // Define our callback for completed work. + callback := func(c *chunk) error { + // Check for an inline buffer. + if pendingPre == 0 && pendingInline > 0 { + pendingInline-- + done += c.uncompressed.Len() + return nil + } + + // Copy the resulting buffer to our intermediate one, and + // return errNewBuffer to ensure that we aren't called a second + // time. This error code is handled specially below. + // + // c.buf will be freed and return to the pool when it is done. + if pendingPre > 0 { + pendingPre-- + } + r.buf = c.uncompressed + return errNewBuffer + } + + for done < len(p) { + // Do we have buffered data available? + if r.buf != nil { + n, err := r.buf.Read(p[done:]) + done += n + if err == io.EOF { + // This is the uncompressed buffer, it can be + // returned to the pool at this point. + r.buf.Reset() + bufPool.Put(r.buf) + r.buf = nil + } else if err != nil { + // Should never happen. + defer r.stop() + return done, err + } + continue + } + + // Read the length of the next chunk and reset the + // reader. The length is used to limit the reader. + // + // See writer.flush. + l, err := binary.ReadUint32(r.in, binary.BigEndian) + if err != nil { + // This is generally okay as long as there + // are still buffers outstanding. We actually + // just wait for completion of those buffers here + // and continue our loop. + if err := r.schedule(nil, callback); err == nil { + // We've actually finished all buffers; this is + // the normal EOF exit path. + defer r.stop() + return done, io.EOF + } else if err == errNewBuffer { + // A new buffer is now available. + continue + } else { + // Some other error occurred; we cannot + // process any further. + defer r.stop() + return done, err + } + } + + // Read this chunk and schedule decompression. + compressed := bufPool.Get().(*bytes.Buffer) + if _, err := io.Copy(compressed, &io.LimitedReader{ + R: r.in, + N: int64(l), + }); err != nil { + // Some other error occurred; see above. + return done, err + } + + // Are we doing inline decoding? + // + // Note that we need to check the length here against + // bytes.MinRead, since the bytes library will choose to grow + // the slice if the available capacity is not at least + // bytes.MinRead. This limits inline decoding to chunkSizes + // that are at least bytes.MinRead (which is not unreasonable). + var c *chunk + start := done + ((pendingPre + pendingInline) * int(r.chunkSize)) + if len(p) >= start+int(r.chunkSize) && len(p) >= start+bytes.MinRead { + c = newChunk(compressed, bytes.NewBuffer(p[start:start])) + pendingInline++ + } else { + c = newChunk(compressed, nil) + } + if err := r.schedule(c, callback); err == errNewBuffer { + // A new buffer was completed while we were reading. + // That's great, but we need to force schedule the + // current buffer so that it does not get lost. + // + // It is safe to pass nil as an output function here, + // because we know that we just freed up a slot above. + r.schedule(c, nil) + } else if err != nil { + // Some other error occurred; see above. + defer r.stop() + return done, err + } + } + + // Make sure that everything has been decoded successfully, otherwise + // parts of p may not actually have completed. + for pendingInline > 0 { + if err := r.schedule(nil, func(c *chunk) error { + if err := callback(c); err != nil { + return err + } + // The nil case means that an inline buffer has + // completed. The callback will have already removed + // the inline buffer from the map, so we just return an + // error to check the top of the loop again. + return errNewBuffer + }); err != errNewBuffer { + // Some other error occurred; see above. + return done, err + } + } + + // Need to return done here, since it may have been adjusted by the + // callback to compensation for partial reads on some inline buffer. + return done, nil +} + +// writer chunks and schedules writes. +type writer struct { + pool + + // out is the underlying writer. + out io.Writer + + // closed indicates whether the file has been closed. + closed bool +} + +// NewWriter returns a new compressed writer. +// +// The recommended chunkSize is on the order of 1M. Extra memory may be +// buffered (in the form of read-ahead, or buffered writes), and is limited to +// O(chunkSize * [1+GOMAXPROCS]). +func NewWriter(out io.Writer, chunkSize uint32, level int) (io.WriteCloser, error) { + w := &writer{ + pool: pool{ + chunkSize: chunkSize, + buf: bufPool.Get().(*bytes.Buffer), + }, + out: out, + } + w.init(true, level) + if err := binary.WriteUint32(w.out, binary.BigEndian, chunkSize); err != nil { + return nil, err + } + return w, nil +} + +// flush writes a single buffer. +func (w *writer) flush(c *chunk) error { + // Prefix each chunk with a length; this allows the reader to safely + // limit reads while buffering. + l := uint32(c.compressed.Len()) + if err := binary.WriteUint32(w.out, binary.BigEndian, l); err != nil { + return err + } + + // Write out to the stream. + _, err := io.Copy(w.out, c.compressed) + return err +} + +// Write implements io.Writer.Write. +func (w *writer) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // Did we close already? + if w.closed { + return 0, io.ErrUnexpectedEOF + } + + // See above; we need to track in the same way. + var ( + pendingPre = w.nextInput - w.nextOutput + pendingInline = 0 + ) + callback := func(c *chunk) error { + if pendingPre == 0 && pendingInline > 0 { + pendingInline-- + return w.flush(c) + } + if pendingPre > 0 { + pendingPre-- + } + err := w.flush(c) + c.uncompressed.Reset() + bufPool.Put(c.uncompressed) + return err + } + + for done := 0; done < len(p); { + // Construct an inline buffer if we're doing an inline + // encoding; see above regarding the bytes.MinRead constraint. + if w.buf.Len() == 0 && len(p) >= done+int(w.chunkSize) && len(p) >= done+bytes.MinRead { + bufPool.Put(w.buf) // Return to the pool; never scheduled. + w.buf = bytes.NewBuffer(p[done : done+int(w.chunkSize)]) + done += int(w.chunkSize) + pendingInline++ + } + + // Do we need to flush w.buf? Note that this case should be hit + // immediately following the inline case above. + left := int(w.chunkSize) - w.buf.Len() + if left == 0 { + if err := w.schedule(newChunk(nil, w.buf), callback); err != nil { + return done, err + } + // Reset the buffer, since this has now been scheduled + // for compression. Note that this may be trampled + // immediately by the bufPool.Put(w.buf) above if the + // next buffer happens to be inline, but that's okay. + w.buf = bufPool.Get().(*bytes.Buffer) + continue + } + + // Read from p into w.buf. + toWrite := len(p) - done + if toWrite > left { + toWrite = left + } + n, err := w.buf.Write(p[done : done+toWrite]) + done += n + if err != nil { + return done, err + } + } + + // Make sure that everything has been flushed, we can't return until + // all the contents from p have been used. + for pendingInline > 0 { + if err := w.schedule(nil, func(c *chunk) error { + if err := callback(c); err != nil { + return err + } + // The flush was successful, return errNewBuffer here + // to break from the loop and check the condition + // again. + return errNewBuffer + }); err != errNewBuffer { + return len(p), err + } + } + + return len(p), nil +} + +// Close implements io.Closer.Close. +func (w *writer) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + // Did we already close? After the call to Close, we always mark as + // closed, regardless of whether the flush is successful. + if w.closed { + return io.ErrUnexpectedEOF + } + w.closed = true + defer w.stop() + + // Schedule any remaining partial buffer; we pass w.flush directly here + // because the final buffer is guaranteed to not be an inline buffer. + if w.buf.Len() > 0 { + if err := w.schedule(newChunk(nil, w.buf), w.flush); err != nil { + return err + } + } + + // Flush all scheduled buffers; see above. + if err := w.schedule(nil, w.flush); err != nil { + return err + } + + // Close the underlying writer (if necessary). + if closer, ok := w.out.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/pkg/compressio/compressio_test.go b/pkg/compressio/compressio_test.go new file mode 100644 index 000000000..d7911419d --- /dev/null +++ b/pkg/compressio/compressio_test.go @@ -0,0 +1,227 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compressio + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "fmt" + "io" + "math/rand" + "runtime" + "testing" + "time" +) + +type harness interface { + Errorf(format string, v ...interface{}) + Fatalf(format string, v ...interface{}) + Logf(format string, v ...interface{}) +} + +func initTest(t harness, size int) []byte { + // Set number of processes to number of CPUs. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Construct synthetic data. We do this by encoding random data with + // base64. This gives a high level of entropy, but still quite a bit of + // structure, to give reasonable compression ratios (~75%). + var buf bytes.Buffer + bufW := base64.NewEncoder(base64.RawStdEncoding, &buf) + bufR := rand.New(rand.NewSource(0)) + if _, err := io.CopyN(bufW, bufR, int64(size)); err != nil { + t.Fatalf("unable to seed random data: %v", err) + } + return buf.Bytes() +} + +type testOpts struct { + Name string + Data []byte + NewWriter func(*bytes.Buffer) (io.Writer, error) + NewReader func(*bytes.Buffer) (io.Reader, error) + PreCompress func() + PostCompress func() + PreDecompress func() + PostDecompress func() + CompressIters int + DecompressIters int +} + +func doTest(t harness, opts testOpts) { + // Compress. + var compressed bytes.Buffer + compressionStartTime := time.Now() + if opts.PreCompress != nil { + opts.PreCompress() + } + if opts.CompressIters <= 0 { + opts.CompressIters = 1 + } + for i := 0; i < opts.CompressIters; i++ { + compressed.Reset() + w, err := opts.NewWriter(&compressed) + if err != nil { + t.Errorf("%s: NewWriter got err %v, expected nil", opts.Name, err) + } + if _, err := io.Copy(w, bytes.NewBuffer(opts.Data)); err != nil { + t.Errorf("%s: compress got err %v, expected nil", opts.Name, err) + return + } + closer, ok := w.(io.Closer) + if ok { + if err := closer.Close(); err != nil { + t.Errorf("%s: got err %v, expected nil", opts.Name, err) + return + } + } + } + if opts.PostCompress != nil { + opts.PostCompress() + } + compressionTime := time.Since(compressionStartTime) + compressionRatio := float32(compressed.Len()) / float32(len(opts.Data)) + + // Decompress. + var decompressed bytes.Buffer + decompressionStartTime := time.Now() + if opts.PreDecompress != nil { + opts.PreDecompress() + } + if opts.DecompressIters <= 0 { + opts.DecompressIters = 1 + } + for i := 0; i < opts.DecompressIters; i++ { + decompressed.Reset() + r, err := opts.NewReader(bytes.NewBuffer(compressed.Bytes())) + if err != nil { + t.Errorf("%s: NewReader got err %v, expected nil", opts.Name, err) + return + } + if _, err := io.Copy(&decompressed, r); err != nil { + t.Errorf("%s: decompress got err %v, expected nil", opts.Name, err) + return + } + } + if opts.PostDecompress != nil { + opts.PostDecompress() + } + decompressionTime := time.Since(decompressionStartTime) + + // Verify. + if decompressed.Len() != len(opts.Data) { + t.Errorf("%s: got %d bytes, expected %d", opts.Name, decompressed.Len(), len(opts.Data)) + } + if !bytes.Equal(opts.Data, decompressed.Bytes()) { + t.Errorf("%s: got mismatch, expected match", opts.Name) + if len(opts.Data) < 32 { // Don't flood the logs. + t.Errorf("got %v, expected %v", decompressed.Bytes(), opts.Data) + } + } + + t.Logf("%s: compression time %v, ratio %2.2f, decompression time %v", + opts.Name, compressionTime, compressionRatio, decompressionTime) +} + +func TestCompress(t *testing.T) { + var ( + data = initTest(t, 10*1024*1024) + data0 = data[:0] + data1 = data[:1] + data2 = data[:11] + data3 = data[:16] + data4 = data[:] + ) + + for _, data := range [][]byte{data0, data1, data2, data3, data4} { + for _, blockSize := range []uint32{1, 4, 1024, 4 * 1024, 16 * 1024} { + // Skip annoying tests; they just take too long. + if blockSize <= 16 && len(data) > 16 { + continue + } + + // Do the compress test. + doTest(t, testOpts{ + Name: fmt.Sprintf("len(data)=%d, blockSize=%d", len(data), blockSize), + Data: data, + NewWriter: func(b *bytes.Buffer) (io.Writer, error) { + return NewWriter(b, blockSize, flate.BestCompression) + }, + NewReader: func(b *bytes.Buffer) (io.Reader, error) { + return NewReader(b) + }, + }) + } + + // Do the vanilla test. + doTest(t, testOpts{ + Name: fmt.Sprintf("len(data)=%d, vanilla flate", len(data)), + Data: data, + NewWriter: func(b *bytes.Buffer) (io.Writer, error) { + return flate.NewWriter(b, flate.BestCompression) + }, + NewReader: func(b *bytes.Buffer) (io.Reader, error) { + return flate.NewReader(b), nil + }, + }) + } +} + +const ( + // benchBlockSize is the blockSize for benchmarks. + benchBlockSize = 32 * 1024 + + // benchDataSize is the amount of data for benchmarks. + benchDataSize = 10 * 1024 * 1024 +) + +func BenchmarkCompress(b *testing.B) { + b.StopTimer() + b.SetBytes(benchDataSize) + data := initTest(b, benchDataSize) + doTest(b, testOpts{ + Name: fmt.Sprintf("len(data)=%d, blockSize=%d", len(data), benchBlockSize), + Data: data, + PreCompress: b.StartTimer, + PostCompress: b.StopTimer, + NewWriter: func(b *bytes.Buffer) (io.Writer, error) { + return NewWriter(b, benchBlockSize, flate.BestCompression) + }, + NewReader: func(b *bytes.Buffer) (io.Reader, error) { + return NewReader(b) + }, + CompressIters: b.N, + }) +} + +func BenchmarkDecompress(b *testing.B) { + b.StopTimer() + b.SetBytes(benchDataSize) + data := initTest(b, benchDataSize) + doTest(b, testOpts{ + Name: fmt.Sprintf("len(data)=%d, blockSize=%d", len(data), benchBlockSize), + Data: data, + PreDecompress: b.StartTimer, + PostDecompress: b.StopTimer, + NewWriter: func(b *bytes.Buffer) (io.Writer, error) { + return NewWriter(b, benchBlockSize, flate.BestCompression) + }, + NewReader: func(b *bytes.Buffer) (io.Reader, error) { + return NewReader(b) + }, + DecompressIters: b.N, + }) +} diff --git a/pkg/control/client/BUILD b/pkg/control/client/BUILD new file mode 100644 index 000000000..9e1c058e4 --- /dev/null +++ b/pkg/control/client/BUILD @@ -0,0 +1,16 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "client", + srcs = [ + "client.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/control/client", + visibility = ["//:sandbox"], + deps = [ + "//pkg/unet", + "//pkg/urpc", + ], +) diff --git a/pkg/control/client/client.go b/pkg/control/client/client.go new file mode 100644 index 000000000..f7c2e8776 --- /dev/null +++ b/pkg/control/client/client.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client provides a basic control client interface. +package client + +import ( + "gvisor.googlesource.com/gvisor/pkg/unet" + "gvisor.googlesource.com/gvisor/pkg/urpc" +) + +// ConnectTo attempts to connect to the sandbox with the given address. +func ConnectTo(addr string) (*urpc.Client, error) { + // Connect to the server. + conn, err := unet.Connect(addr, false) + if err != nil { + return nil, err + } + + // Wrap in our stream codec. + return urpc.NewClient(conn), nil +} diff --git a/pkg/control/server/BUILD b/pkg/control/server/BUILD new file mode 100644 index 000000000..2d0fdd8b8 --- /dev/null +++ b/pkg/control/server/BUILD @@ -0,0 +1,15 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "server", + srcs = ["server.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/control/server", + visibility = ["//:sandbox"], + deps = [ + "//pkg/log", + "//pkg/unet", + "//pkg/urpc", + ], +) diff --git a/pkg/control/server/server.go b/pkg/control/server/server.go new file mode 100644 index 000000000..d00061ce3 --- /dev/null +++ b/pkg/control/server/server.go @@ -0,0 +1,160 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package server provides a basic control server interface. + +Note that no objects are registered by default. Users must provide their own +implementations of the control interface. +*/ +package server + +import ( + "os" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" + "gvisor.googlesource.com/gvisor/pkg/urpc" +) + +// curUID is the unix user ID of the user that the control server is running as. +var curUID = os.Getuid() + +// Server is a basic control server. +type Server struct { + // socket is our bound socket. + socket *unet.ServerSocket + + // server is our rpc server. + server *urpc.Server + + // wg waits for the accept loop to terminate. + wg sync.WaitGroup +} + +// New returns a new bound control server. +func New(socket *unet.ServerSocket) *Server { + return &Server{ + socket: socket, + server: urpc.NewServer(), + } +} + +// FD returns the file descriptor that the server is running on. +func (s *Server) FD() int { + return s.socket.FD() +} + +// Wait waits for the main server goroutine to exit. This should be +// called after a call to Serve. +func (s *Server) Wait() { + s.wg.Wait() +} + +// Stop stops the server. Note that this function should only be called once +// and the server should not be used afterwards. +func (s *Server) Stop() { + s.socket.Close() + s.wg.Wait() + + // This will cause existing clients to be terminated safely. + s.server.Stop() +} + +// StartServing starts listening for connect and spawns the main service +// goroutine for handling incoming control requests. StartServing does not +// block; to wait for the control server to exit, call Wait. +func (s *Server) StartServing() error { + // Actually start listening. + if err := s.socket.Listen(); err != nil { + return err + } + + s.wg.Add(1) + go func() { // S/R-SAFE: does not impact state directly. + s.serve() + s.wg.Done() + }() + + return nil +} + +// serve is the body of the main service goroutine. It handles incoming control +// connections and dispatches requests to registered objects. +func (s *Server) serve() { + for { + // Accept clients. + conn, err := s.socket.Accept() + if err != nil { + return + } + + ucred, err := conn.GetPeerCred() + if err != nil { + log.Warningf("Control couldn't get credentials: %s", err.Error()) + conn.Close() + continue + } + + // Only allow this user and root. + if int(ucred.Uid) != curUID && ucred.Uid != 0 { + // Authentication failed. + log.Warningf("Control auth failure: other UID = %d, current UID = %d", ucred.Uid, curUID) + conn.Close() + continue + } + + // Handle the connection non-blockingly. + s.server.StartHandling(conn) + } +} + +// Register registers a specific control interface with the server. +func (s *Server) Register(obj interface{}) { + s.server.Register(obj) +} + +// CreateFromFD creates a new control bound to the given 'fd'. It has no +// registered interfaces and will not start serving until StartServing is +// called. +func CreateFromFD(fd int) (*Server, error) { + socket, err := unet.NewServerSocket(fd) + if err != nil { + return nil, err + } + return New(socket), nil +} + +// Create creates a new control server with an abstract unix socket +// with the given address, which must must be unique and a valid +// abstract socket name. +func Create(addr string) (*Server, error) { + socket, err := unet.Bind(addr, false) + if err != nil { + return nil, err + } + return New(socket), nil +} + +// CreateSocket creates a socket that can be used with control server, +// but doesn't start control server. 'addr' must be a valid and unique +// abstract socket name. Returns socket's FD, -1 in case of error. +func CreateSocket(addr string) (int, error) { + socket, err := unet.Bind(addr, false) + if err != nil { + return -1, err + } + return socket.Release() +} diff --git a/pkg/cpuid/BUILD b/pkg/cpuid/BUILD new file mode 100644 index 000000000..a503b7ae8 --- /dev/null +++ b/pkg/cpuid/BUILD @@ -0,0 +1,43 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "cpuid_state", + srcs = ["cpuid.go"], + out = "cpuid_state.go", + package = "cpuid", +) + +go_library( + name = "cpuid", + srcs = [ + "cpu_amd64.s", + "cpuid.go", + "cpuid_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/cpuid", + visibility = ["//:sandbox"], + deps = [ + "//pkg/log", + "//pkg/state", + ], +) + +go_test( + name = "cpuid_test", + size = "small", + srcs = ["cpuid_test.go"], + embed = [":cpuid"], +) + +go_test( + name = "cpuid_parse_test", + size = "small", + srcs = [ + "cpuid_parse_test.go", + ], + embed = [":cpuid"], + tags = ["manual"], +) diff --git a/pkg/cpuid/cpu_amd64.s b/pkg/cpuid/cpu_amd64.s new file mode 100644 index 000000000..48a13c6fd --- /dev/null +++ b/pkg/cpuid/cpu_amd64.s @@ -0,0 +1,24 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// func HostID(rax, rcx uint32) (ret0, ret1, ret2, ret3 uint32) +TEXT ·HostID(SB),$0-48 + MOVL ax+0(FP), AX + MOVL cx+4(FP), CX + CPUID + MOVL AX, ret0+8(FP) + MOVL BX, ret1+12(FP) + MOVL CX, ret2+16(FP) + MOVL DX, ret3+20(FP) + RET diff --git a/pkg/cpuid/cpuid.go b/pkg/cpuid/cpuid.go new file mode 100644 index 000000000..aa248dd98 --- /dev/null +++ b/pkg/cpuid/cpuid.go @@ -0,0 +1,824 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +// Package cpuid provides basic functionality for creating and adjusting CPU +// feature sets. +// +// To use FeatureSets, one should start with an existing FeatureSet (either a +// known platform, or HostFeatureSet()) and then add, remove, and test for +// features as desired. +// +// For example: Test for hardware extended state saving, and if we don't have +// it, don't expose AVX, which cannot be saved with fxsave. +// +// if !HostFeatureSet().HasFeature(X86FeatureXSAVE) { +// exposedFeatures.Remove(X86FeatureAVX) +// } +package cpuid + +import ( + "bytes" + "fmt" + "io/ioutil" + "strconv" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// Feature is a unique identifier for a particular cpu feature. We just use an +// int as a feature number on x86. It corresponds to the bit position in the +// basic feature mask returned by a cpuid with eax=1. +type Feature int + +// block is a collection of 32 Feature bits. +type block int + +const blockSize = 32 + +// Feature bits are numbered according to "blocks". Each block is 32 bits, and +// feature bits from the same source (cpuid leaf/level) are in the same block. +func featureID(b block, bit int) Feature { + return Feature(32*int(b) + bit) +} + +// Block 0 constants are all of the "basic" feature bits returned by a cpuid in +// ecx with eax=1. +const ( + X86FeatureSSE3 Feature = iota + X86FeaturePCLMULDQ + X86FeatureDTES64 + X86FeatureMONITOR + X86FeatureDSCPL + X86FeatureVMX + X86FeatureSMX + X86FeatureEST + X86FeatureTM2 + X86FeatureSSSE3 // Not a typo, "supplemental" SSE3. + X86FeatureCNXTID + X86FeatureSDBG + X86FeatureFMA + X86FeatureCX16 + X86FeatureXTPR + X86FeaturePDCM + _ // ecx bit 16 is reserved. + X86FeaturePCID + X86FeatureDCA + X86FeatureSSE4_1 + X86FeatureSSE4_2 + X86FeatureX2APIC + X86FeatureMOVBE + X86FeaturePOPCNT + X86FeatureTSCD + X86FeatureAES + X86FeatureXSAVE + X86FeatureOSXSAVE + X86FeatureAVX + X86FeatureF16C + X86FeatureRDRAND + _ // ecx bit 31 is reserved. +) + +// Block 1 constants are all of the "basic" feature bits returned by a cpuid in +// edx with eax=1. +const ( + X86FeatureFPU Feature = 32 + iota + X86FeatureVME + X86FeatureDE + X86FeaturePSE + X86FeatureTSC + X86FeatureMSR + X86FeaturePAE + X86FeatureMCE + X86FeatureCX8 + X86FeatureAPIC + _ // edx bit 10 is reserved. + X86FeatureSEP + X86FeatureMTRR + X86FeaturePGE + X86FeatureMCA + X86FeatureCMOV + X86FeaturePAT + X86FeaturePSE36 + X86FeaturePSN + X86FeatureCLFSH + _ // edx bit 20 is reserved. + X86FeatureDS + X86FeatureACPI + X86FeatureMMX + X86FeatureFXSR + X86FeatureSSE + X86FeatureSSE2 + X86FeatureSS + X86FeatureHTT + X86FeatureTM + X86FeatureIA64 + X86FeaturePBE +) + +// Block 2 bits are the "structured extended" features returned in ebx for +// eax=7, ecx=0. +const ( + X86FeatureFSGSBase Feature = 2*32 + iota + X86FeatureTSC_ADJUST + _ // ebx bit 2 is reserved. + X86FeatureBMI1 + X86FeatureHLE + X86FeatureAVX2 + X86FeatureFDP_EXCPTN_ONLY + X86FeatureSMEP + X86FeatureBMI2 + X86FeatureERMS + X86FeatureINVPCID + X86FeatureRTM + X86FeatureCQM + X86FeatureFPCSDS + X86FeatureMPX + X86FeatureRDT + X86FeatureAVX512F + X86FeatureAVX512DQ + X86FeatureRDSEED + X86FeatureADX + X86FeatureSMAP + X86FeatureAVX512IFMA + X86FeaturePCOMMIT + X86FeatureCLFLUSHOPT + X86FeatureCLWB + X86FeatureIPT // Intel processor trace. + X86FeatureAVX512PF + X86FeatureAVX512ER + X86FeatureAVX512CD + X86FeatureSHA + X86FeatureAVX512BW + X86FeatureAVX512VL +) + +// Block 3 bits are the "extended" features returned in ecx for eax=7, ecx=0. +const ( + X86FeaturePREFETCHWT1 Feature = 3*32 + iota + X86FeatureAVX512VBMI + X86FeatureUMIP + X86FeaturePKU +) + +// Block 4 constants are for xsave capabilities in CPUID.(EAX=0DH,ECX=01H):EAX. +// The CPUID leaf is available only if 'X86FeatureXSAVE' is present. +const ( + X86FeatureXSAVEOPT Feature = 4*32 + iota + X86FeatureXSAVEC + X86FeatureXGETBV1 + X86FeatureXSAVES + // EAX[31:4] are reserved. +) + +// Block 5 constants are the extended feature bits in +// CPUID.(EAX=0x80000001):ECX. These are very sparse, and so the bit positions +// are assigned manually. +const ( + X86FeatureLAHF64 Feature = 5*32 + 0 + X86FeatureLZCNT Feature = 5*32 + 5 + X86FeaturePREFETCHW Feature = 5*32 + 8 +) + +// Block 6 constants are the extended feature bits in +// CPUID.(EAX=0x80000001):EDX. These are very sparse, and so the bit positions +// are assigned manually. +const ( + X86FeatureSYSCALL Feature = 6*32 + 11 + X86FeatureNX Feature = 6*32 + 20 + X86FeatureGBPAGES Feature = 6*32 + 26 + X86FeatureRDTSCP Feature = 6*32 + 27 + X86FeatureLM Feature = 6*32 + 29 + // These are not in the most recent intel manual. Not surprising... It + // shouldn't matter but we should find where these bits come from and + // support them. The linux strings are below for completeness. + //X86FeatureMMXEXT + //X86FeatureMP + //X86FeatureFXSR_OPT + //X86Feature3DNOWEXT + //X86Feature3DNOW + //X86FeatureMMXEXT: "mmxext", + //X86FeatureMP: "mp", + //X86FeatureFXSR_OPT: "fxsr_opt", + //X86Feature3DNOWEXT: "3dnowext", + //X86Feature3DNOW: "3dnow", +) + +// linuxBlockOrder defines the order in which linux organizes the feature +// blocks. Linux also tracks feature bits in 32-bit blocks, but in an order +// which doesn't match well here, so for the /proc/cpuinfo generation we simply +// re-map the blocks to Linux's ordering and then go through the bits in each +// block. +var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4} + +// To make emulation of /proc/cpuinfo easy down the line, these names match the +// names of the basic features in Linux defined in +// arch/x86/kernel/cpu/capflags.c. +var x86FeatureStrings = map[Feature]string{ + X86FeatureFPU: "fpu", + X86FeatureVME: "vme", + X86FeatureDE: "de", + X86FeaturePSE: "pse", + X86FeatureTSC: "tsc", + X86FeatureMSR: "msr", + X86FeaturePAE: "pae", + X86FeatureMCE: "mce", + X86FeatureCX8: "cx8", + X86FeatureAPIC: "apic", + X86FeatureSEP: "sep", + X86FeatureMTRR: "mtrr", + X86FeaturePGE: "pge", + X86FeatureMCA: "mca", + X86FeatureCMOV: "cmov", + X86FeaturePAT: "pat", + X86FeaturePSE36: "pse36", + X86FeaturePSN: "pn", + X86FeatureCLFSH: "clflush", + X86FeatureDS: "dts", + X86FeatureACPI: "acpi", + X86FeatureMMX: "mmx", + X86FeatureFXSR: "fxsr", + X86FeatureSSE: "sse", + X86FeatureSSE2: "sse2", + X86FeatureSS: "ss", + X86FeatureHTT: "ht", + X86FeatureTM: "tm", + X86FeatureIA64: "ia64", + X86FeaturePBE: "pbe", + X86FeatureSSE3: "pni", + X86FeaturePCLMULDQ: "pclmulqdq", + X86FeatureDTES64: "dtes64", + X86FeatureMONITOR: "monitor", + X86FeatureDSCPL: "ds_cpl", + X86FeatureVMX: "vmx", + X86FeatureSMX: "smx", + X86FeatureEST: "est", + X86FeatureTM2: "tm2", + X86FeatureSSSE3: "ssse3", + X86FeatureCNXTID: "cid", + X86FeatureFMA: "fma", + X86FeatureCX16: "cx16", + X86FeatureXTPR: "xtpr", + X86FeaturePDCM: "pdcm", + X86FeaturePCID: "pcid", + X86FeatureDCA: "dca", + X86FeatureSSE4_1: "sse4_1", + X86FeatureSSE4_2: "sse4_2", + X86FeatureX2APIC: "x2apic", + X86FeatureMOVBE: "movbe", + X86FeaturePOPCNT: "popcnt", + X86FeatureTSCD: "tsc_deadline_timer", + X86FeatureAES: "aes", + X86FeatureXSAVE: "xsave", + X86FeatureAVX: "avx", + X86FeatureF16C: "f16c", + X86FeatureRDRAND: "rdrand", + X86FeatureFSGSBase: "fsgsbase", + X86FeatureTSC_ADJUST: "tsc_adjust", + X86FeatureBMI1: "bmi1", + X86FeatureHLE: "hle", + X86FeatureAVX2: "avx2", + X86FeatureSMEP: "smep", + X86FeatureBMI2: "bmi2", + X86FeatureERMS: "erms", + X86FeatureINVPCID: "invpcid", + X86FeatureRTM: "rtm", + X86FeatureCQM: "cqm", + X86FeatureMPX: "mpx", + X86FeatureRDT: "rdt", + X86FeatureAVX512F: "avx512f", + X86FeatureAVX512DQ: "avx512dq", + X86FeatureRDSEED: "rdseed", + X86FeatureADX: "adx", + X86FeatureSMAP: "smap", + X86FeatureCLWB: "clwb", + X86FeatureAVX512CD: "avx512cd", + X86FeatureAVX512BW: "avx512bw", + X86FeatureAVX512VL: "avx512vl", + X86FeatureSYSCALL: "syscall", + X86FeatureNX: "nx", + X86FeatureGBPAGES: "pdpe1gb", + X86FeatureRDTSCP: "rdtscp", + X86FeatureLM: "lm", + X86FeatureXSAVEOPT: "xsaveopt", + X86FeatureXSAVEC: "xsavec", + X86FeatureXGETBV1: "xgetbv1", + X86FeatureLAHF64: "lahf_lm", // LAHF/SAHF in long mode + X86FeatureLZCNT: "abm", // Advanced bit manipulation + X86FeaturePREFETCHW: "3dnowprefetch", +} + +// These flags are parse only---they can be used for setting / unsetting the +// flags, but will not get printed out in /proc/cpuinfo. +var x86FeatureParseOnlyStrings = map[Feature]string{ + X86FeaturePKU: "pku", + X86FeatureXSAVES: "xsaves", + X86FeatureFPCSDS: "fpcsds", + X86FeatureOSXSAVE: "osxsave", + X86FeatureIPT: "pt", + X86FeatureSDBG: "sdbg", + X86FeatureFDP_EXCPTN_ONLY: "fdp_excptn_only", + X86FeatureCLFLUSHOPT: "clfushopt", +} + +// These are the default values of various FeatureSet fields. +const ( + defaultVendorID = "GenuineIntel" + + // These processor signature defaults are derived from the values + // listed in Intel AN485 for i7/Xeon processors. + defaultExtFamily uint8 = 0 + defaultExtModel uint8 = 1 + defaultType uint8 = 0 + defaultFamily uint8 = 0x06 + defaultModel uint8 = 0x0a + defaultSteppingID uint8 = 0 +) + +// Just a way to wrap cpuid function numbers. +type cpuidFunction uint32 + +// The constants below are the lower or "standard" cpuid functions. See Intel +// AN485 for detailed information about each one. +const ( + vendorID cpuidFunction = iota // Returns vendor ID and largest standard function. + featureInfo // Returns basic feature bits and processor signature. + cacheDescriptors // Returns list of cache descriptors. + serialNumber // Returns processor serial number (obsolete on new hardware). + deterministicCacheParams // Returns deterministic cache information. See AN485. + monitorMwaitParams // Returns information about monitor/mwait instructions. + powerParams // Returns information about power management and thermal sensors. + extendedFeatureInfo // Returns extended feature bits. + _ // Function 8 is reserved. + DCAParams // Returns direct cache access information. + pmcInfo // Returns information about performance monitoring features. + x2APICInfo // Returns core/logical processor topology. See AN485 for details. + _ // Function 0xc is reserved. + xSaveInfo // Returns information about extended state management. +) + +// The "extended" functions start at 0x80000000. Intel AP-485 has information +// on these as well. +const ( + extendedFunctionInfo cpuidFunction = 0x80000000 + iota // Returns highest available extended function in eax. + extendedFeatures // Returns some extended feature bits in edx and ecx. +) + +var cpuFreqMHz float64 + +// x86FeaturesFromString includes features from x86FeatureStrings and +// x86FeatureParseOnlyStrings. +var x86FeaturesFromString = make(map[string]Feature) + +// FeatureFromString returns the Feature associated with the given feature +// string plus a bool to indicate if it could find the feature. +func FeatureFromString(s string) (Feature, bool) { + f, b := x86FeaturesFromString[s] + return f, b +} + +// String implements fmt.Stringer. +func (f Feature) String() string { + if s := f.flagString(false); s != "" { + return s + } + return fmt.Sprintf("<cpuflag %d>", f) +} + +func (f Feature) flagString(cpuinfoOnly bool) string { + if s, ok := x86FeatureStrings[f]; ok { + return s + } + if !cpuinfoOnly { + return x86FeatureParseOnlyStrings[f] + } + return "" +} + +// FeatureSet is a set of Features for a cpu. +type FeatureSet struct { + // Set is the set of features that are enabled in this FeatureSet. + Set map[Feature]bool + + // VendorID is the 12-char string returned in ebx:edx:ecx for eax=0. + VendorID string + + // ExtendedFamily is part of the processor signature. + ExtendedFamily uint8 + + // ExtendedModel is part of the processor signature. + ExtendedModel uint8 + + // ProcessorType is part of the processor signature. + ProcessorType uint8 + + // Family is part of the processor signature. + Family uint8 + + // Model is part of the processor signature. + Model uint8 + + // SteppingID is part of the processor signature. + SteppingID uint8 +} + +// FlagsString prints out supported CPU flags. If cpuinfoOnly is true, it is +// equivalent to the "flags" field in /proc/cpuinfo. +func (fs *FeatureSet) FlagsString(cpuinfoOnly bool) string { + var s []string + for _, b := range linuxBlockOrder { + for i := 0; i < blockSize; i++ { + if f := featureID(b, i); fs.Set[f] { + if fstr := f.flagString(cpuinfoOnly); fstr != "" { + s = append(s, fstr) + } + } + } + } + return strings.Join(s, " ") +} + +// CPUInfo is to generate a section of one cpu in /proc/cpuinfo. This is a +// minimal /proc/cpuinfo, it is missing some fields like "microcode" that are +// not always printed in Linux. The bogomips field is simply made up. +func (fs FeatureSet) CPUInfo(cpu uint) string { + var b bytes.Buffer + fmt.Fprintf(&b, "processor\t: %d\n", cpu) + fmt.Fprintf(&b, "vendor_id\t: %s\n", fs.VendorID) + fmt.Fprintf(&b, "cpu family\t: %d\n", ((fs.ExtendedFamily<<4)&0xff)|fs.Family) + fmt.Fprintf(&b, "model\t\t: %d\n", ((fs.ExtendedModel<<4)&0xff)|fs.Model) + fmt.Fprintf(&b, "model name\t: %s\n", "unknown") // Unknown for now. + fmt.Fprintf(&b, "stepping\t: %s\n", "unknown") // Unknown for now. + fmt.Fprintf(&b, "cpu MHz\t\t: %.3f\n", cpuFreqMHz) + fmt.Fprintln(&b, "fpu\t\t: yes") + fmt.Fprintln(&b, "fpu_exception\t: yes") + fmt.Fprintf(&b, "cpuid level\t: %d\n", uint32(xSaveInfo)) // Same as ax in vendorID. + fmt.Fprintln(&b, "wp\t\t: yes") + fmt.Fprintf(&b, "flags\t\t: %s\n", fs.FlagsString(true)) + fmt.Fprintf(&b, "bogomips\t: %.02f\n", cpuFreqMHz) // It's bogus anyway. + fmt.Fprintf(&b, "clflush size\t: %d\n", 64) + fmt.Fprintf(&b, "cache_alignment\t: %d\n", 64) + fmt.Fprintf(&b, "address sizes\t: %d bits physical, %d bits virtual\n", 46, 48) + fmt.Fprintln(&b, "power management:") // This is always here, but can be blank. + fmt.Fprintln(&b, "") // The /proc/cpuinfo file ends with an extra newline. + return b.String() +} + +// Helper to convert 3 regs into 12-byte vendor ID. +func vendorIDFromRegs(bx, cx, dx uint32) string { + bytes := make([]byte, 0, 12) + for i := uint(0); i < 4; i++ { + b := byte(bx >> (i * 8)) + bytes = append(bytes, b) + } + + for i := uint(0); i < 4; i++ { + b := byte(dx >> (i * 8)) + bytes = append(bytes, b) + } + + for i := uint(0); i < 4; i++ { + b := byte(cx >> (i * 8)) + bytes = append(bytes, b) + } + return string(bytes) +} + +// ExtendedStateSize returns the number of bytes needed to save the "extended +// state" for this processor and the boundary it must be aligned to. Extended +// state includes floating point registers, and other cpu state that's not +// associated with the normal task context. +// +// Note: We can save some space here with an optimiazation where we use a +// smaller chunk of memory depending on features that are actually enabled. +// Currently we just use the largest possible size for simplicity (which is +// about 2.5K worst case, with avx512). +func (fs *FeatureSet) ExtendedStateSize() (size, align uint) { + if fs.UseXsave() { + // Leaf 0 of xsaveinfo function returns the size for currently + // enabled xsave features in ebx, the maximum size if all valid + // features are saved with xsave in ecx, and valid XCR0 bits in + // edx:eax. + _, _, maxSize, _ := HostID(uint32(xSaveInfo), 0) + return uint(maxSize), 64 + } + + // If we don't support xsave, we fall back to fxsave, which requires + // 512 bytes aligned to 16 bytes. + return 512, 16 +} + +// ValidXCR0Mask returns the bits that may be set to 1 in control register +// XCR0. +func (fs *FeatureSet) ValidXCR0Mask() uint64 { + if !fs.UseXsave() { + return 0 + } + eax, _, _, edx := HostID(uint32(xSaveInfo), 0) + return uint64(edx)<<32 | uint64(eax) +} + +// vendorIDRegs returns the 3 register values used to construct the 12-byte +// vendor ID string for eax=0. +func (fs *FeatureSet) vendorIDRegs() (bx, dx, cx uint32) { + for i := uint(0); i < 4; i++ { + bx |= uint32(fs.VendorID[i]) << (i * 8) + } + + for i := uint(0); i < 4; i++ { + dx |= uint32(fs.VendorID[i+4]) << (i * 8) + } + + for i := uint(0); i < 4; i++ { + cx |= uint32(fs.VendorID[i+8]) << (i * 8) + } + return +} + +// signature returns the signature dword that's returned in eax when eax=1. +func (fs *FeatureSet) signature() uint32 { + var s uint32 + s |= uint32(fs.SteppingID & 0xf) + s |= uint32(fs.Model&0xf) << 4 + s |= uint32(fs.Family&0xf) << 8 + s |= uint32(fs.ProcessorType&0x3) << 12 + s |= uint32(fs.ExtendedModel&0xf) << 16 + s |= uint32(fs.ExtendedFamily&0xff) << 20 + return s +} + +// Helper to deconstruct signature dword. +func signatureSplit(v uint32) (ef, em, pt, f, m, sid uint8) { + sid = uint8(v & 0xf) + m = uint8(v>>4) & 0xf + f = uint8(v>>8) & 0xf + pt = uint8(v>>12) & 0x3 + em = uint8(v>>16) & 0xf + ef = uint8(v >> 20) + return +} + +// This factory function is only needed inside the package, package users +// should not be creating and using empty feature sets. +func newEmptyFeatureSet() *FeatureSet { + return newFeatureSet(make(map[Feature]bool)) +} + +// newFeatureSet creates a new FeatureSet with sensible default values and the +// provided set of features. +func newFeatureSet(s map[Feature]bool) *FeatureSet { + return &FeatureSet{ + Set: s, + VendorID: defaultVendorID, + ExtendedFamily: defaultExtFamily, + ExtendedModel: defaultExtModel, + ProcessorType: defaultType, + Family: defaultFamily, + Model: defaultModel, + SteppingID: defaultSteppingID, + } +} + +// Helper to convert blockwise feature bit masks into a set of features. Masks +// must be provided in order for each block, without skipping them. If a block +// does not matter for this feature set, 0 is specified. +func setFromBlockMasks(blocks ...uint32) map[Feature]bool { + s := make(map[Feature]bool) + for b, blockMask := range blocks { + for i := 0; i < blockSize; i++ { + if blockMask&1 != 0 { + s[featureID(block(b), i)] = true + } + blockMask >>= 1 + } + } + return s +} + +// blockMask returns the 32-bit mask associated with a block of features. +func (fs *FeatureSet) blockMask(b block) uint32 { + var mask uint32 + for i := 0; i < blockSize; i++ { + if fs.Set[featureID(b, i)] { + mask |= 1 << uint(i) + } + } + return mask +} + +// Remove removes a Feature from a FeatureSet. It ignores features +// that are not in the FeatureSet. +func (fs *FeatureSet) Remove(feature Feature) { + delete(fs.Set, feature) +} + +// Add adds a Feature to a FeatureSet. It ignores duplicate features. +func (fs *FeatureSet) Add(feature Feature) { + fs.Set[feature] = true +} + +// HasFeature tests whether or not a feature is in the given feature set. +func (fs *FeatureSet) HasFeature(feature Feature) bool { + return fs.Set[feature] +} + +// IsSubset returns true if the FeatureSet is a subset of the FeatureSet passed in. +// This is useful if you want to see if a FeatureSet is compatible with another +// FeatureSet, since you can only run with a given FeatureSet if it's a subset of +// the host's. +func (fs *FeatureSet) IsSubset(other *FeatureSet) bool { + return fs.Subtract(other) == nil +} + +// Subtract returns the features present in fs that are not present in other. +// If all features in fs are present in other, Subtract returns nil. +func (fs *FeatureSet) Subtract(other *FeatureSet) (diff map[Feature]bool) { + for f := range fs.Set { + if !other.Set[f] { + if diff == nil { + diff = make(map[Feature]bool) + } + diff[f] = true + } + } + + return +} + +// TakeFeatureIntersection will set the features in `fs` to the intersection of +// the features in `fs` and `other` (effectively clearing any feature bits on +// `fs` that are not also set in `other`). +func (fs *FeatureSet) TakeFeatureIntersection(other *FeatureSet) { + for f := range fs.Set { + if !other.Set[f] { + delete(fs.Set, f) + } + } +} + +// EmulateID emulates a cpuid instruction based on the feature set. +func (fs *FeatureSet) EmulateID(origAx, origCx uint32) (ax, bx, cx, dx uint32) { + switch cpuidFunction(origAx) { + case vendorID: + ax = uint32(xSaveInfo) // 0xd (xSaveInfo) is the highest function we support. + bx, dx, cx = fs.vendorIDRegs() + case featureInfo: + // clflush line size (ebx bits[15:8]) hardcoded as 8. This + // means cache lines of size 64 bytes. + bx = 8 << 8 + cx = fs.blockMask(block(0)) + dx = fs.blockMask(block(1)) + ax = fs.signature() + case xSaveInfo: + if !fs.UseXsave() { + return 0, 0, 0, 0 + } + return HostID(uint32(xSaveInfo), origCx) + case extendedFeatureInfo: + if origCx != 0 { + break // Only leaf 0 is supported. + } + bx = fs.blockMask(block(2)) + cx = fs.blockMask(block(3)) + case extendedFunctionInfo: + // We only support showing the extended features. + ax = uint32(extendedFeatures) + cx = 0 + case extendedFeatures: + cx = fs.blockMask(block(5)) + dx = fs.blockMask(block(6)) + } + + return +} + +// UseXsave returns the choice of fp state saving instruction. +func (fs *FeatureSet) UseXsave() bool { + return fs.HasFeature(X86FeatureXSAVE) && fs.HasFeature(X86FeatureOSXSAVE) +} + +// UseXsaveopt returns true if 'fs' supports the "xsaveopt" instruction. +func (fs *FeatureSet) UseXsaveopt() bool { + return fs.UseXsave() && fs.HasFeature(X86FeatureXSAVEOPT) +} + +// HostID executes a native CPUID instruction. +func HostID(axArg, cxArg uint32) (ax, bx, cx, dx uint32) + +// HostFeatureSet uses cpuid to get host values and construct a feature set +// that matches that of the host machine. Note that there are several places +// where there appear to be some unecessary assignments between register names +// (ax, bx, cx, or dx) and featureBlockN variables. This is to explicitly show +// where the different feature blocks come from, to make the code easier to +// inspect and read. +func HostFeatureSet() *FeatureSet { + // eax=0 gets max supported feature and vendor ID. + _, bx, cx, dx := HostID(0, 0) + vendorID := vendorIDFromRegs(bx, cx, dx) + + // eax=1 gets basic features in ecx:edx. + ax, _, cx, dx := HostID(1, 0) + featureBlock0 := cx + featureBlock1 := dx + ef, em, pt, f, m, sid := signatureSplit(ax) + + // eax=7, ecx=0 gets extended features in ecx:ebx. + _, bx, cx, _ = HostID(7, 0) + featureBlock2 := bx + featureBlock3 := cx + + // Leaf 0xd is supported only if CPUID.1:ECX.XSAVE[bit 26] is set. + var featureBlock4 uint32 + if (featureBlock0 & (1 << 26)) != 0 { + featureBlock4, _, _, _ = HostID(uint32(xSaveInfo), 1) + } + + // eax=0x80000000 gets supported extended levels. We use this to + // determine if there are any non-zero block 4 or block 6 bits to find. + var featureBlock5, featureBlock6 uint32 + if ax, _, _, _ := HostID(uint32(extendedFunctionInfo), 0); ax >= uint32(extendedFeatures) { + // eax=0x80000001 gets AMD added feature bits. + _, _, cx, dx = HostID(uint32(extendedFeatures), 0) + featureBlock5 = cx + featureBlock6 = dx + } + + set := setFromBlockMasks(featureBlock0, featureBlock1, featureBlock2, featureBlock3, featureBlock4, featureBlock5, featureBlock6) + return &FeatureSet{ + Set: set, + VendorID: vendorID, + ExtendedFamily: ef, + ExtendedModel: em, + ProcessorType: pt, + Family: f, + Model: m, + SteppingID: sid, + } +} + +// Reads max cpu frequency from host /proc/cpuinfo. Must run before +// whitelisting. This value is used to create the fake /proc/cpuinfo from a +// FeatureSet. +func initCPUFreq() { + cpuinfob, err := ioutil.ReadFile("/proc/cpuinfo") + if err != nil { + // Leave it as 0... The standalone VDSO bails out in the same + // way. + log.Warningf("Could not read /proc/cpuinfo: %v", err) + return + } + cpuinfo := string(cpuinfob) + + // We get the value straight from host /proc/cpuinfo. On machines with + // frequency scaling enabled, this will only get the current value + // which will likely be innacurate. This is fine on machines with + // frequency scaling disabled. + for _, line := range strings.Split(cpuinfo, "\n") { + if strings.Contains(line, "cpu MHz") { + splitMHz := strings.Split(line, ":") + if len(splitMHz) < 2 { + log.Warningf("Could not read /proc/cpuinfo: malformed cpu MHz line") + return + } + + // If there was a problem, leave cpuFreqMHz as 0. + var err error + cpuFreqMHz, err = strconv.ParseFloat(strings.TrimSpace(splitMHz[1]), 64) + if err != nil { + log.Warningf("Could not parse cpu MHz value %v: %v", splitMHz[1], err) + cpuFreqMHz = 0 + return + } + return + } + } + log.Warningf("Could not parse /proc/cpuinfo, it is empty or does not contain cpu MHz") +} + +func initFeaturesFromString() { + for f, s := range x86FeatureStrings { + x86FeaturesFromString[s] = f + } + for f, s := range x86FeatureParseOnlyStrings { + x86FeaturesFromString[s] = f + } +} + +func init() { + // initCpuFreq must be run before whitelists are enabled. + initCPUFreq() + initFeaturesFromString() +} diff --git a/pkg/cpuid/cpuid_parse_test.go b/pkg/cpuid/cpuid_parse_test.go new file mode 100644 index 000000000..c4f52818c --- /dev/null +++ b/pkg/cpuid/cpuid_parse_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuid + +import ( + "io/ioutil" + "strings" + "testing" +) + +// TestHostFeatureFlags ensures that package cpuid recognizes all features +// present on this host. +func TestHostFeatureFlags(t *testing.T) { + cpuinfoBytes, _ := ioutil.ReadFile("/proc/cpuinfo") + cpuinfo := string(cpuinfoBytes) + t.Logf("Host cpu info:\n%s", cpuinfo) + + for f := range HostFeatureSet().Set { + if f.flagString(false) == "" { + t.Errorf("Non-parsable feature: %v", f) + } + if s := f.flagString(true); !strings.Contains(cpuinfo, s) { + t.Errorf("Non-native flag: %v", f) + } + } +} diff --git a/pkg/cpuid/cpuid_test.go b/pkg/cpuid/cpuid_test.go new file mode 100644 index 000000000..02f732f85 --- /dev/null +++ b/pkg/cpuid/cpuid_test.go @@ -0,0 +1,223 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cpuid + +import ( + "testing" +) + +var justFPU = &FeatureSet{ + Set: map[Feature]bool{ + X86FeatureFPU: true, + }} + +var justFPUandPAE = &FeatureSet{ + Set: map[Feature]bool{ + X86FeatureFPU: true, + X86FeaturePAE: true, + }} + +func TestIsSubset(t *testing.T) { + if !justFPU.IsSubset(justFPUandPAE) { + t.Errorf("Got %v is not subset of %v, want IsSubset being true", justFPU, justFPUandPAE) + } + + if justFPUandPAE.IsSubset(justFPU) { + t.Errorf("Got %v is a subset of %v, want IsSubset being false", justFPU, justFPUandPAE) + } +} + +func TestTakeFeatureIntersection(t *testing.T) { + testFeatures := HostFeatureSet() + testFeatures.TakeFeatureIntersection(justFPU) + if !testFeatures.IsSubset(justFPU) { + t.Errorf("Got more features than expected after intersecting host features with justFPU: %v, want %v", testFeatures.Set, justFPU.Set) + } + if !testFeatures.HasFeature(X86FeatureFPU) { + t.Errorf("Got no features in testFeatures after intersecting, want %v", X86FeatureFPU) + } +} + +// TODO: Run this test on a very old platform, and make sure more +// bits are enabled than just FPU and PAE. This test currently may not detect +// if HostFeatureSet gives back junk bits. +func TestHostFeatureSet(t *testing.T) { + hostFeatures := HostFeatureSet() + if !justFPUandPAE.IsSubset(hostFeatures) { + t.Errorf("Got invalid feature set %v from HostFeatureSet()", hostFeatures) + } +} + +func TestHasFeature(t *testing.T) { + if !justFPU.HasFeature(X86FeatureFPU) { + t.Errorf("HasFeature failed, %v should contain %v", justFPU, X86FeatureFPU) + } + + if justFPU.HasFeature(X86FeatureAVX) { + t.Errorf("HasFeature failed, %v should not contain %v", justFPU, X86FeatureAVX) + } +} + +// Note: these tests are aware of and abuse internal details of FeatureSets. +// Users of FeatureSets should not depend on this. +func TestAdd(t *testing.T) { + // Test a basic insertion into the FeatureSet. + testFeatures := newEmptyFeatureSet() + testFeatures.Add(X86FeatureCLFSH) + if len(testFeatures.Set) != 1 { + t.Errorf("Got length %v want 1", len(testFeatures.Set)) + } + + if !testFeatures.HasFeature(X86FeatureCLFSH) { + t.Errorf("Add failed, got %v want set with %v", testFeatures, X86FeatureCLFSH) + } + + // Test that duplicates are ignored. + testFeatures.Add(X86FeatureCLFSH) + if len(testFeatures.Set) != 1 { + t.Errorf("Got length %v, want 1", len(testFeatures.Set)) + } +} + +func TestRemove(t *testing.T) { + // Try removing the last feature. + testFeatures := newEmptyFeatureSet() + testFeatures.Add(X86FeatureFPU) + testFeatures.Add(X86FeaturePAE) + testFeatures.Remove(X86FeaturePAE) + if !testFeatures.HasFeature(X86FeatureFPU) || len(testFeatures.Set) != 1 || testFeatures.HasFeature(X86FeaturePAE) { + t.Errorf("Remove failed, got %v want %v", testFeatures, justFPU) + } + + // Try removing a feature not in the set. + testFeatures.Remove(X86FeatureRDRAND) + if !testFeatures.HasFeature(X86FeatureFPU) || len(testFeatures.Set) != 1 { + t.Errorf("Remove failed, got %v want %v", testFeatures, justFPU) + } +} + +func TestFeatureFromString(t *testing.T) { + f, ok := FeatureFromString("avx") + if f != X86FeatureAVX || !ok { + t.Errorf("got %v want avx", f) + } + + f, ok = FeatureFromString("bad") + if ok { + t.Errorf("got %v want nothing", f) + } +} + +// This tests function 0 (eax=0), which returns the vendor ID and highest cpuid +// function reported to be available. +func TestEmulateIDVendorAndLength(t *testing.T) { + testFeatures := newEmptyFeatureSet() + + ax, bx, cx, dx := testFeatures.EmulateID(0, 0) + wantEax := uint32(0xd) // Highest supported cpuid function. + + // These magical constants are the characters of "GenuineIntel". + // See Intel AN485 for a reference on why they are laid out like this. + wantEbx := uint32(0x756e6547) + wantEcx := uint32(0x6c65746e) + wantEdx := uint32(0x49656e69) + if wantEax != ax { + t.Errorf("highest function failed, got %x want %x", ax, wantEax) + } + + if wantEbx != bx || wantEcx != cx || wantEdx != dx { + t.Errorf("vendor string emulation failed, bx:cx:dx, got %x:%x:%x want %x:%x:%x", bx, cx, dx, wantEbx, wantEcx, wantEdx) + } +} + +func TestEmulateIDBasicFeatures(t *testing.T) { + // Make a minimal test feature set. + testFeatures := newEmptyFeatureSet() + testFeatures.Add(X86FeatureCLFSH) + testFeatures.Add(X86FeatureAVX) + + ax, bx, cx, dx := testFeatures.EmulateID(1, 0) + ECXAVXBit := uint32(1 << uint(X86FeatureAVX)) + EDXCLFlushBit := uint32(1 << uint(X86FeatureCLFSH-32)) // We adjust by 32 since it's in block 1. + + if EDXCLFlushBit&dx == 0 || dx&^EDXCLFlushBit != 0 { + t.Errorf("EmulateID failed, got feature bits %x want %x", dx, testFeatures.blockMask(1)) + } + + if ECXAVXBit&cx == 0 || cx&^ECXAVXBit != 0 { + t.Errorf("EmulateID failed, got feature bits %x want %x", cx, testFeatures.blockMask(0)) + } + + // Default signature bits, based on values for i7/Xeon. + // See Intel AN485 for information on stepping/model bits. + defaultSignature := uint32(0x000106a0) + if defaultSignature != ax { + t.Errorf("EmulateID stepping emulation failed, got %x want %x", ax, defaultSignature) + } + + clflushSizeInfo := uint32(8 << 8) + if clflushSizeInfo != bx { + t.Errorf("EmulateID bx emulation failed, got %x want %x", bx, clflushSizeInfo) + } +} + +func TestEmulateIDExtendedFeatures(t *testing.T) { + // Make a minimal test feature set, one bit in each extended feature word. + testFeatures := newEmptyFeatureSet() + testFeatures.Add(X86FeatureSMEP) + testFeatures.Add(X86FeatureAVX512VBMI) + + ax, bx, cx, dx := testFeatures.EmulateID(7, 0) + EBXSMEPBit := uint32(1 << uint(X86FeatureSMEP-2*32)) // Adjust by 2*32 since SMEP is a block 2 feature. + ECXAVXBit := uint32(1 << uint(X86FeatureAVX512VBMI-3*32)) // We adjust by 3*32 since it's a block 3 feature. + + // Test that the desired bit is set and no other bits are set. + if EBXSMEPBit&bx == 0 || bx&^EBXSMEPBit != 0 { + t.Errorf("extended feature emulation failed, got feature bits %x want %x", bx, testFeatures.blockMask(2)) + } + + if ECXAVXBit&cx == 0 || cx&^ECXAVXBit != 0 { + t.Errorf("extended feature emulation failed, got feature bits %x want %x", cx, testFeatures.blockMask(3)) + } + + if ax != 0 || dx != 0 { + t.Errorf("extended feature emulation failed, ax:dx, got %x:%x want 0:0", ax, dx) + } + + // Check that no subleaves other than 0 do anything. + ax, bx, cx, dx = testFeatures.EmulateID(7, 1) + if ax != 0 || bx != 0 || cx != 0 || dx != 0 { + t.Errorf("extended feature emulation failed, got %x:%x:%x:%x want 0:0", ax, bx, cx, dx) + } + +} + +// Checks that the expected extended features are available via cpuid functions +// 0x80000000 and up. +func TestEmulateIDExtended(t *testing.T) { + testFeatures := newEmptyFeatureSet() + testFeatures.Add(X86FeatureSYSCALL) + EDXSYSCALLBit := uint32(1 << uint(X86FeatureSYSCALL-6*32)) // Adjust by 6*32 since SYSCALL is a block 6 feature. + + ax, bx, cx, dx := testFeatures.EmulateID(0x80000000, 0) + if ax != 0x80000001 || bx != 0 || cx != 0 || dx != 0 { + t.Errorf("EmulateID extended emulation failed, ax:bx:cx:dx, got %x:%x:%x:%x want 0x80000001:0:0:0", ax, bx, cx, dx) + } + + _, _, _, dx = testFeatures.EmulateID(0x80000001, 0) + if EDXSYSCALLBit&dx == 0 || dx&^EDXSYSCALLBit != 0 { + t.Errorf("extended feature emulation failed, got feature bits %x want %x", dx, testFeatures.blockMask(6)) + } +} diff --git a/pkg/dhcp/BUILD b/pkg/dhcp/BUILD new file mode 100644 index 000000000..b40860aac --- /dev/null +++ b/pkg/dhcp/BUILD @@ -0,0 +1,36 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "dhcp", + srcs = [ + "client.go", + "dhcp.go", + "server.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/dhcp", + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/udp", + "//pkg/waiter", + ], +) + +go_test( + name = "dhcp_test", + size = "small", + srcs = ["dhcp_test.go"], + embed = [":dhcp"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/link/channel", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/udp", + ], +) diff --git a/pkg/dhcp/client.go b/pkg/dhcp/client.go new file mode 100644 index 000000000..9a4fd7ae4 --- /dev/null +++ b/pkg/dhcp/client.go @@ -0,0 +1,274 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dhcp + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "log" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Client is a DHCP client. +type Client struct { + stack *stack.Stack + nicid tcpip.NICID + linkAddr tcpip.LinkAddress + + mu sync.Mutex + addr tcpip.Address + cfg Config + lease time.Duration + cancelRenew func() +} + +// NewClient creates a DHCP client. +// +// TODO: add s.LinkAddr(nicid) to *stack.Stack. +func NewClient(s *stack.Stack, nicid tcpip.NICID, linkAddr tcpip.LinkAddress) *Client { + return &Client{ + stack: s, + nicid: nicid, + linkAddr: linkAddr, + } +} + +// Start starts the DHCP client. +// It will periodically search for an IP address using the Request method. +func (c *Client) Start() { + go func() { + for { + log.Print("DHCP request") + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + err := c.Request(ctx, "") + cancel() + if err == nil { + break + } + } + log.Printf("DHCP acquired IP %s for %s", c.Address(), c.Config().LeaseLength) + }() +} + +// Address reports the IP address acquired by the DHCP client. +func (c *Client) Address() tcpip.Address { + c.mu.Lock() + defer c.mu.Unlock() + return c.addr +} + +// Config reports the DHCP configuration acquired with the IP address lease. +func (c *Client) Config() Config { + c.mu.Lock() + defer c.mu.Unlock() + return c.cfg +} + +// Shutdown relinquishes any lease and ends any outstanding renewal timers. +func (c *Client) Shutdown() { + c.mu.Lock() + defer c.mu.Unlock() + if c.addr != "" { + c.stack.RemoveAddress(c.nicid, c.addr) + } + if c.cancelRenew != nil { + c.cancelRenew() + } +} + +// Request executes a DHCP request session. +// +// On success, it adds a new address to this client's TCPIP stack. +// If the server sets a lease limit a timer is set to automatically +// renew it. +func (c *Client) Request(ctx context.Context, requestedAddr tcpip.Address) error { + var wq waiter.Queue + ep, err := c.stack.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + return fmt.Errorf("dhcp: outbound endpoint: %v", err) + } + err = ep.Bind(tcpip.FullAddress{ + Addr: "\x00\x00\x00\x00", + Port: clientPort, + }, nil) + defer ep.Close() + if err != nil { + return fmt.Errorf("dhcp: connect failed: %v", err) + } + + epin, err := c.stack.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + return fmt.Errorf("dhcp: inbound endpoint: %v", err) + } + err = epin.Bind(tcpip.FullAddress{ + Addr: "\xff\xff\xff\xff", + Port: clientPort, + }, nil) + defer epin.Close() + if err != nil { + return fmt.Errorf("dhcp: connect failed: %v", err) + } + + var xid [4]byte + rand.Read(xid[:]) + + // DHCPDISCOVERY + options := options{ + {optDHCPMsgType, []byte{byte(dhcpDISCOVER)}}, + {optParamReq, []byte{ + 1, // request subnet mask + 3, // request router + 15, // domain name + 6, // domain name server + }}, + } + if requestedAddr != "" { + options = append(options, option{optReqIPAddr, []byte(requestedAddr)}) + } + h := make(header, headerBaseSize+options.len()) + h.init() + h.setOp(opRequest) + copy(h.xidbytes(), xid[:]) + h.setBroadcast() + copy(h.chaddr(), c.linkAddr) + h.setOptions(options) + + serverAddr := &tcpip.FullAddress{ + Addr: "\xff\xff\xff\xff", + Port: serverPort, + } + wopts := tcpip.WriteOptions{ + To: serverAddr, + } + if _, err := ep.Write(tcpip.SlicePayload(h), wopts); err != nil { + return fmt.Errorf("dhcp discovery write: %v", err) + } + + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + // DHCPOFFER + for { + var addr tcpip.FullAddress + v, err := epin.Read(&addr) + if err == tcpip.ErrWouldBlock { + select { + case <-ch: + continue + case <-ctx.Done(): + return fmt.Errorf("reading dhcp offer: %v", tcpip.ErrAborted) + } + } + h = header(v) + if h.isValid() && h.op() == opReply && bytes.Equal(h.xidbytes(), xid[:]) { + break + } + } + if _, err := h.options(); err != nil { + return fmt.Errorf("dhcp offer: %v", err) + } + + var ack bool + var cfg Config + + // DHCPREQUEST + addr := tcpip.Address(h.yiaddr()) + if err := c.stack.AddAddress(c.nicid, ipv4.ProtocolNumber, addr); err != nil { + if err != tcpip.ErrDuplicateAddress { + return fmt.Errorf("adding address: %v", err) + } + } + defer func() { + if ack { + c.mu.Lock() + c.addr = addr + c.cfg = cfg + c.mu.Unlock() + } else { + c.stack.RemoveAddress(c.nicid, addr) + } + }() + h.setOp(opRequest) + for i, b := 0, h.yiaddr(); i < len(b); i++ { + b[i] = 0 + } + h.setOptions([]option{ + {optDHCPMsgType, []byte{byte(dhcpREQUEST)}}, + {optReqIPAddr, []byte(addr)}, + {optDHCPServer, h.siaddr()}, + }) + if _, err := ep.Write(tcpip.SlicePayload(h), wopts); err != nil { + return fmt.Errorf("dhcp discovery write: %v", err) + } + + // DHCPACK + for { + var addr tcpip.FullAddress + v, err := epin.Read(&addr) + if err == tcpip.ErrWouldBlock { + select { + case <-ch: + continue + case <-ctx.Done(): + return fmt.Errorf("reading dhcp ack: %v", tcpip.ErrAborted) + } + } + h = header(v) + if h.isValid() && h.op() == opReply && bytes.Equal(h.xidbytes(), xid[:]) { + break + } + } + opts, e := h.options() + if e != nil { + return fmt.Errorf("dhcp ack: %v", e) + } + if err := cfg.decode(opts); err != nil { + return fmt.Errorf("dhcp ack bad options: %v", err) + } + msgtype, e := opts.dhcpMsgType() + if e != nil { + return fmt.Errorf("dhcp ack: %v", e) + } + ack = msgtype == dhcpACK + if !ack { + return fmt.Errorf("dhcp: request not acknowledged") + } + if cfg.LeaseLength != 0 { + go c.renewAfter(cfg.LeaseLength) + } + return nil +} + +func (c *Client) renewAfter(d time.Duration) { + c.mu.Lock() + defer c.mu.Unlock() + if c.cancelRenew != nil { + c.cancelRenew() + } + ctx, cancel := context.WithCancel(context.Background()) + c.cancelRenew = cancel + go func() { + timer := time.NewTimer(d) + defer timer.Stop() + select { + case <-ctx.Done(): + case <-timer.C: + if err := c.Request(ctx, c.addr); err != nil { + log.Printf("address renewal failed: %v", err) + go c.renewAfter(1 * time.Minute) + } + } + }() +} diff --git a/pkg/dhcp/dhcp.go b/pkg/dhcp/dhcp.go new file mode 100644 index 000000000..762086853 --- /dev/null +++ b/pkg/dhcp/dhcp.go @@ -0,0 +1,263 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dhcp implements a DHCP client and server as described in RFC 2131. +package dhcp + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +// Config is standard DHCP configuration. +type Config struct { + ServerAddress tcpip.Address // address of the server + SubnetMask tcpip.AddressMask // client address subnet mask + Gateway tcpip.Address // client default gateway + DomainNameServer tcpip.Address // client domain name server + LeaseLength time.Duration // length of the address lease +} + +func (cfg *Config) decode(opts []option) error { + *cfg = Config{} + for _, opt := range opts { + b := opt.body + if l := opt.code.len(); l != -1 && l != len(b) { + return fmt.Errorf("%s bad length: %d", opt.code, len(b)) + } + switch opt.code { + case optLeaseTime: + t := binary.BigEndian.Uint32(b) + cfg.LeaseLength = time.Duration(t) * time.Second + case optSubnetMask: + cfg.SubnetMask = tcpip.AddressMask(b) + case optDHCPServer: + cfg.ServerAddress = tcpip.Address(b) + case optDefaultGateway: + cfg.Gateway = tcpip.Address(b) + case optDomainNameServer: + cfg.DomainNameServer = tcpip.Address(b) + } + } + return nil +} + +func (cfg Config) encode() (opts []option) { + if cfg.ServerAddress != "" { + opts = append(opts, option{optDHCPServer, []byte(cfg.ServerAddress)}) + } + if cfg.SubnetMask != "" { + opts = append(opts, option{optSubnetMask, []byte(cfg.SubnetMask)}) + } + if cfg.Gateway != "" { + opts = append(opts, option{optDefaultGateway, []byte(cfg.Gateway)}) + } + if cfg.DomainNameServer != "" { + opts = append(opts, option{optDomainNameServer, []byte(cfg.DomainNameServer)}) + } + if l := cfg.LeaseLength / time.Second; l != 0 { + v := make([]byte, 4) + v[0] = byte(l >> 24) + v[1] = byte(l >> 16) + v[2] = byte(l >> 8) + v[3] = byte(l >> 0) + opts = append(opts, option{optLeaseTime, v}) + } + return opts +} + +const ( + serverPort = 67 + clientPort = 68 +) + +var magicCookie = []byte{99, 130, 83, 99} // RFC 1497 + +type xid uint32 + +type header []byte + +func (h header) init() { + h[1] = 0x01 // htype + h[2] = 0x06 // hlen + h[3] = 0x00 // hops + h[8], h[9] = 0, 0 // secs + copy(h[236:240], magicCookie) +} + +func (h header) isValid() bool { + if len(h) < 241 { + return false + } + if o := h.op(); o != opRequest && o != opReply { + return false + } + if h[1] != 0x01 || h[2] != 0x06 || h[3] != 0x00 { + return false + } + return bytes.Equal(h[236:240], magicCookie) && h[len(h)-1] == 0 +} + +func (h header) op() op { return op(h[0]) } +func (h header) setOp(o op) { h[0] = byte(o) } +func (h header) xidbytes() []byte { return h[4:8] } +func (h header) xid() xid { return xid(h[4])<<24 | xid(h[5])<<16 | xid(h[6])<<8 | xid(h[7]) } +func (h header) setBroadcast() { h[10], h[11] = 0x80, 0x00 } // flags top bit +func (h header) ciaddr() []byte { return h[12:16] } +func (h header) yiaddr() []byte { return h[16:20] } +func (h header) siaddr() []byte { return h[20:24] } +func (h header) giaddr() []byte { return h[24:28] } +func (h header) chaddr() []byte { return h[28:44] } +func (h header) sname() []byte { return h[44:108] } +func (h header) file() []byte { return h[108:236] } + +func (h header) options() (opts options, err error) { + i := headerBaseSize + for i < len(h) { + if h[i] == 0 { + i++ + continue + } + if h[i] == 255 { + break + } + if len(h) <= i+1 { + return nil, fmt.Errorf("option missing length") + } + optlen := int(h[i+1]) + if len(h) < i+2+optlen { + return nil, fmt.Errorf("option too long") + } + opts = append(opts, option{ + code: optionCode(h[i]), + body: h[i+2 : i+2+optlen], + }) + i += 2 + optlen + } + return opts, nil +} + +func (h header) setOptions(opts []option) { + i := headerBaseSize + for _, opt := range opts { + h[i] = byte(opt.code) + h[i+1] = byte(len(opt.body)) + copy(h[i+2:i+2+len(opt.body)], opt.body) + i += 2 + len(opt.body) + } + for ; i < len(h); i++ { + h[i] = 0 + } +} + +// headerBaseSize is the size of a DHCP packet, including the magic cookie. +// +// Note that a DHCP packet is required to have an 'end' option that takes +// up an extra byte, so the minimum DHCP packet size is headerBaseSize + 1. +const headerBaseSize = 240 + +type option struct { + code optionCode + body []byte +} + +type optionCode byte + +const ( + optSubnetMask optionCode = 1 + optDefaultGateway optionCode = 3 + optDomainNameServer optionCode = 6 + optReqIPAddr optionCode = 50 + optLeaseTime optionCode = 51 + optDHCPMsgType optionCode = 53 // dhcpMsgType + optDHCPServer optionCode = 54 + optParamReq optionCode = 55 +) + +func (code optionCode) len() int { + switch code { + case optSubnetMask, optDefaultGateway, optDomainNameServer, + optReqIPAddr, optLeaseTime, optDHCPServer: + return 4 + case optDHCPMsgType: + return 1 + case optParamReq: + return -1 // no fixed length + default: + return -1 + } +} + +func (code optionCode) String() string { + switch code { + case optSubnetMask: + return "option(subnet-mask)" + case optDefaultGateway: + return "option(default-gateway)" + case optDomainNameServer: + return "option(dns)" + case optReqIPAddr: + return "option(request-ip-address)" + case optLeaseTime: + return "option(least-time)" + case optDHCPMsgType: + return "option(message-type)" + case optDHCPServer: + return "option(server)" + case optParamReq: + return "option(parameter-request)" + default: + return fmt.Sprintf("option(%d)", code) + } +} + +type options []option + +func (opts options) dhcpMsgType() (dhcpMsgType, error) { + for _, opt := range opts { + if opt.code == optDHCPMsgType { + if len(opt.body) != 1 { + return 0, fmt.Errorf("%s: bad length: %d", optDHCPMsgType, len(opt.body)) + } + v := opt.body[0] + if v <= 0 || v >= 8 { + return 0, fmt.Errorf("%s: unknown value: %d", optDHCPMsgType, v) + } + return dhcpMsgType(v), nil + } + } + return 0, nil +} + +func (opts options) len() int { + l := 0 + for _, opt := range opts { + l += 1 + 1 + len(opt.body) // code + len + body + } + return l + 1 // extra byte for 'pad' option +} + +type op byte + +const ( + opRequest op = 0x01 + opReply op = 0x02 +) + +// dhcpMsgType is the DHCP Message Type from RFC 1533, section 9.4. +type dhcpMsgType byte + +const ( + dhcpDISCOVER dhcpMsgType = 1 + dhcpOFFER dhcpMsgType = 2 + dhcpREQUEST dhcpMsgType = 3 + dhcpDECLINE dhcpMsgType = 4 + dhcpACK dhcpMsgType = 5 + dhcpNAK dhcpMsgType = 6 + dhcpRELEASE dhcpMsgType = 7 +) diff --git a/pkg/dhcp/dhcp_test.go b/pkg/dhcp/dhcp_test.go new file mode 100644 index 000000000..d56b93997 --- /dev/null +++ b/pkg/dhcp/dhcp_test.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dhcp + +import ( + "context" + "strings" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" +) + +func TestDHCP(t *testing.T) { + const defaultMTU = 65536 + id, linkEP := channel.New(256, defaultMTU, "") + if testing.Verbose() { + id = sniffer.New(id) + } + + go func() { + for pkt := range linkEP.C { + v := make(buffer.View, len(pkt.Header)+len(pkt.Payload)) + copy(v, pkt.Header) + copy(v[len(pkt.Header):], pkt.Payload) + vv := v.ToVectorisedView([1]buffer.View{}) + linkEP.Inject(pkt.Proto, &vv) + } + }() + + s := stack.New([]string{ipv4.ProtocolName}, []string{udp.ProtocolName}) + + const nicid tcpip.NICID = 1 + if err := s.CreateNIC(nicid, id); err != nil { + t.Fatal(err) + } + if err := s.AddAddress(nicid, ipv4.ProtocolNumber, "\x00\x00\x00\x00"); err != nil { + t.Fatal(err) + } + if err := s.AddAddress(nicid, ipv4.ProtocolNumber, "\xff\xff\xff\xff"); err != nil { + t.Fatal(err) + } + const serverAddr = tcpip.Address("\xc0\xa8\x03\x01") + if err := s.AddAddress(nicid, ipv4.ProtocolNumber, serverAddr); err != nil { + t.Fatal(err) + } + + s.SetRouteTable([]tcpip.Route{{ + Destination: tcpip.Address(strings.Repeat("\x00", 4)), + Mask: tcpip.Address(strings.Repeat("\x00", 4)), + Gateway: "", + NIC: nicid, + }}) + + var clientAddrs = []tcpip.Address{"\xc0\xa8\x03\x02", "\xc0\xa8\x03\x03"} + + serverCfg := Config{ + ServerAddress: serverAddr, + SubnetMask: "\xff\xff\xff\x00", + Gateway: "\xc0\xa8\x03\xF0", + DomainNameServer: "\x08\x08\x08\x08", + LeaseLength: 24 * time.Hour, + } + serverCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err := NewServer(serverCtx, s, clientAddrs, serverCfg) + if err != nil { + t.Fatal(err) + } + + const clientLinkAddr0 = tcpip.LinkAddress("\x52\x11\x22\x33\x44\x52") + c0 := NewClient(s, nicid, clientLinkAddr0) + if err := c0.Request(context.Background(), ""); err != nil { + t.Fatal(err) + } + if got, want := c0.Address(), clientAddrs[0]; got != want { + t.Errorf("c.Addr()=%s, want=%s", got, want) + } + if err := c0.Request(context.Background(), ""); err != nil { + t.Fatal(err) + } + if got, want := c0.Address(), clientAddrs[0]; got != want { + t.Errorf("c.Addr()=%s, want=%s", got, want) + } + + const clientLinkAddr1 = tcpip.LinkAddress("\x52\x11\x22\x33\x44\x53") + c1 := NewClient(s, nicid, clientLinkAddr1) + if err := c1.Request(context.Background(), ""); err != nil { + t.Fatal(err) + } + if got, want := c1.Address(), clientAddrs[1]; got != want { + t.Errorf("c.Addr()=%s, want=%s", got, want) + } + + if err := c0.Request(context.Background(), ""); err != nil { + t.Fatal(err) + } + if got, want := c0.Address(), clientAddrs[0]; got != want { + t.Errorf("c.Addr()=%s, want=%s", got, want) + } + + if got, want := c0.Config(), serverCfg; got != want { + t.Errorf("client config:\n\t%#+v\nwant:\n\t%#+v", got, want) + } +} diff --git a/pkg/dhcp/server.go b/pkg/dhcp/server.go new file mode 100644 index 000000000..d132d90b4 --- /dev/null +++ b/pkg/dhcp/server.go @@ -0,0 +1,289 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dhcp + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Server is a DHCP server. +type Server struct { + stack *stack.Stack + broadcast tcpip.FullAddress + wq waiter.Queue + ep tcpip.Endpoint + addrs []tcpip.Address // TODO: use a tcpip.AddressMask or range structure + cfg Config + cfgopts []option // cfg to send to client + + handlers []chan header + + mu sync.Mutex + leases map[tcpip.LinkAddress]serverLease +} + +// NewServer creates a new DHCP server and begins serving. +// The server continues serving until ctx is done. +func NewServer(ctx context.Context, stack *stack.Stack, addrs []tcpip.Address, cfg Config) (*Server, error) { + s := &Server{ + stack: stack, + addrs: addrs, + cfg: cfg, + cfgopts: cfg.encode(), + broadcast: tcpip.FullAddress{ + Addr: "\xff\xff\xff\xff", + Port: clientPort, + }, + + handlers: make([]chan header, 8), + leases: make(map[tcpip.LinkAddress]serverLease), + } + + var err *tcpip.Error + s.ep, err = s.stack.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &s.wq) + if err != nil { + return nil, fmt.Errorf("dhcp: server endpoint: %v", err) + } + serverBroadcast := tcpip.FullAddress{ + Addr: "", + Port: serverPort, + } + if err := s.ep.Bind(serverBroadcast, nil); err != nil { + return nil, fmt.Errorf("dhcp: server bind: %v", err) + } + + for i := 0; i < len(s.handlers); i++ { + ch := make(chan header, 8) + s.handlers[i] = ch + go s.handler(ctx, ch) + } + + go s.expirer(ctx) + go s.reader(ctx) + return s, nil +} + +func (s *Server) expirer(ctx context.Context) { + t := time.NewTicker(1 * time.Minute) + defer t.Stop() + for { + select { + case <-t.C: + s.mu.Lock() + for linkAddr, lease := range s.leases { + if time.Since(lease.start) > s.cfg.LeaseLength { + lease.state = leaseExpired + s.leases[linkAddr] = lease + } + } + s.mu.Unlock() + case <-ctx.Done(): + return + } + } +} + +// reader listens for all incoming DHCP packets and fans them out to +// handling goroutines based on XID as session identifiers. +func (s *Server) reader(ctx context.Context) { + we, ch := waiter.NewChannelEntry(nil) + s.wq.EventRegister(&we, waiter.EventIn) + defer s.wq.EventUnregister(&we) + + for { + var addr tcpip.FullAddress + v, err := s.ep.Read(&addr) + if err == tcpip.ErrWouldBlock { + select { + case <-ch: + continue + case <-ctx.Done(): + return + } + } + + h := header(v) + if !h.isValid() || h.op() != opRequest { + continue + } + xid := h.xid() + + // Fan out the packet to a handler goroutine. + // + // Use a consistent handler for a given xid, so that + // packets from a particular client are processed + // in order. + ch := s.handlers[int(xid)%len(s.handlers)] + select { + case <-ctx.Done(): + return + case ch <- h: + default: + // drop the packet + } + } +} + +func (s *Server) handler(ctx context.Context, ch chan header) { + for { + select { + case h := <-ch: + if h == nil { + return + } + opts, err := h.options() + if err != nil { + continue + } + // TODO: Handle DHCPRELEASE and DHCPDECLINE. + msgtype, err := opts.dhcpMsgType() + if err != nil { + continue + } + switch msgtype { + case dhcpDISCOVER: + s.handleDiscover(h, opts) + case dhcpREQUEST: + s.handleRequest(h, opts) + } + case <-ctx.Done(): + return + } + } +} + +func (s *Server) handleDiscover(hreq header, opts options) { + linkAddr := tcpip.LinkAddress(hreq.chaddr()[:6]) + xid := hreq.xid() + + s.mu.Lock() + lease := s.leases[linkAddr] + switch lease.state { + case leaseNew: + if len(s.leases) < len(s.addrs) { + // Find an unused address. + // TODO: avoid building this state on each request. + alloced := make(map[tcpip.Address]bool) + for _, lease := range s.leases { + alloced[lease.addr] = true + } + for _, addr := range s.addrs { + if !alloced[addr] { + lease = serverLease{ + start: time.Now(), + addr: addr, + xid: xid, + state: leaseOffer, + } + s.leases[linkAddr] = lease + break + } + } + } else { + // No more addresses, take an expired address. + for k, oldLease := range s.leases { + if oldLease.state == leaseExpired { + delete(s.leases, k) + lease = serverLease{ + start: time.Now(), + addr: lease.addr, + xid: xid, + state: leaseOffer, + } + s.leases[linkAddr] = lease + break + } + } + log.Printf("server has no more addresses") + s.mu.Unlock() + return + } + case leaseOffer, leaseAck, leaseExpired: + lease = serverLease{ + start: time.Now(), + addr: s.leases[linkAddr].addr, + xid: xid, + state: leaseOffer, + } + s.leases[linkAddr] = lease + } + s.mu.Unlock() + + // DHCPOFFER + opts = options{{optDHCPMsgType, []byte{byte(dhcpOFFER)}}} + opts = append(opts, s.cfgopts...) + h := make(header, headerBaseSize+opts.len()) + h.init() + h.setOp(opReply) + copy(h.xidbytes(), hreq.xidbytes()) + copy(h.yiaddr(), lease.addr) + copy(h.siaddr(), s.cfg.ServerAddress) + copy(h.chaddr(), hreq.chaddr()) + h.setOptions(opts) + s.ep.Write(tcpip.SlicePayload(h), tcpip.WriteOptions{To: &s.broadcast}) +} + +func (s *Server) handleRequest(hreq header, opts options) { + linkAddr := tcpip.LinkAddress(hreq.chaddr()[:6]) + xid := hreq.xid() + + s.mu.Lock() + lease := s.leases[linkAddr] + switch lease.state { + case leaseOffer, leaseAck, leaseExpired: + lease = serverLease{ + start: time.Now(), + addr: s.leases[linkAddr].addr, + xid: xid, + state: leaseAck, + } + s.leases[linkAddr] = lease + } + s.mu.Unlock() + + if lease.state == leaseNew { + // TODO: NACK or accept request + return + } + + // DHCPACK + opts = []option{{optDHCPMsgType, []byte{byte(dhcpACK)}}} + opts = append(opts, s.cfgopts...) + h := make(header, headerBaseSize+opts.len()) + h.init() + h.setOp(opReply) + copy(h.xidbytes(), hreq.xidbytes()) + copy(h.yiaddr(), lease.addr) + copy(h.siaddr(), s.cfg.ServerAddress) + copy(h.chaddr(), hreq.chaddr()) + h.setOptions(opts) + s.ep.Write(tcpip.SlicePayload(h), tcpip.WriteOptions{To: &s.broadcast}) +} + +type leaseState int + +const ( + leaseNew leaseState = iota + leaseOffer + leaseAck + leaseExpired +) + +type serverLease struct { + start time.Time + addr tcpip.Address + xid xid + state leaseState +} diff --git a/pkg/eventchannel/BUILD b/pkg/eventchannel/BUILD new file mode 100644 index 000000000..ea0c587be --- /dev/null +++ b/pkg/eventchannel/BUILD @@ -0,0 +1,33 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "eventchannel", + srcs = [ + "event.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/eventchannel", + visibility = ["//:sandbox"], + deps = [ + ":eventchannel_go_proto", + "//pkg/log", + "//pkg/unet", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library", + ], +) + +proto_library( + name = "eventchannel_proto", + srcs = ["event.proto"], + visibility = ["//:sandbox"], +) + +go_proto_library( + name = "eventchannel_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/eventchannel/eventchannel_go_proto", + proto = ":eventchannel_proto", + visibility = ["//:sandbox"], +) diff --git a/pkg/eventchannel/event.go b/pkg/eventchannel/event.go new file mode 100644 index 000000000..bfd28256e --- /dev/null +++ b/pkg/eventchannel/event.go @@ -0,0 +1,165 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package eventchannel contains functionality for sending any protobuf message +// on a socketpair. +// +// The wire format is a uvarint length followed by a binary protobuf.Any +// message. +package eventchannel + +import ( + "encoding/binary" + "fmt" + "sync" + "syscall" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "gvisor.googlesource.com/gvisor/pkg/eventchannel/eventchannel_go_proto" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// Emitter emits a proto message. +type Emitter interface { + // Emit writes a single eventchannel message to an emitter. Emit should + // return hangup = true to indicate an emitter has "hung up" and no further + // messages should be directed to it. + Emit(msg proto.Message) (hangup bool, err error) + + // Close closes this emitter. Emit cannot be used after Close is called. + Close() error +} + +var ( + mu sync.Mutex + emitters = make(map[Emitter]struct{}) +) + +// Emit emits a message using all added emitters. +func Emit(msg proto.Message) error { + mu.Lock() + defer mu.Unlock() + + var err error + for e := range emitters { + hangup, eerr := e.Emit(msg) + if eerr != nil { + if err == nil { + err = fmt.Errorf("error emitting %v: on %v: %v", msg, e, eerr) + } else { + err = fmt.Errorf("%v; on %v: %v", err, e, eerr) + } + + // Log as well, since most callers ignore the error. + log.Warningf("Error emitting %v on %v: %v", msg, e, eerr) + } + if hangup { + log.Infof("Hangup on eventchannel emitter %v.", e) + delete(emitters, e) + } + } + + return err +} + +// AddEmitter adds a new emitter. +func AddEmitter(e Emitter) { + mu.Lock() + defer mu.Unlock() + emitters[e] = struct{}{} +} + +func marshal(msg proto.Message) ([]byte, error) { + anypb, err := ptypes.MarshalAny(msg) + if err != nil { + return nil, err + } + + // Wire format is uvarint message length followed by binary proto. + bufMsg, err := proto.Marshal(anypb) + if err != nil { + return nil, err + } + p := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(p, uint64(len(bufMsg))) + return append(p[:n], bufMsg...), nil +} + +// socketEmitter emits proto messages on a socket. +type socketEmitter struct { + socket *unet.Socket +} + +// SocketEmitter creates a new event channel based on the given fd. +// +// SocketEmitter takes ownership of fd. +func SocketEmitter(fd int) (Emitter, error) { + s, err := unet.NewSocket(fd) + if err != nil { + return nil, err + } + + return &socketEmitter{ + socket: s, + }, nil +} + +// Emit implements Emitter.Emit. +func (s *socketEmitter) Emit(msg proto.Message) (bool, error) { + p, err := marshal(msg) + if err != nil { + return false, err + } + for done := 0; done < len(p); { + n, err := s.socket.Write(p[done:]) + if err != nil { + return (err == syscall.EPIPE), err + } + done += n + } + return false, nil +} + +// Close implements Emitter.Emit. +func (s *socketEmitter) Close() error { + return s.socket.Close() +} + +// debugEmitter wraps an emitter to emit stringified event messages. This is +// useful for debugging -- when the messages are intended for humans. +type debugEmitter struct { + inner Emitter +} + +// DebugEmitterFrom creates a new event channel emitter by wraping an existing +// raw emitter. +func DebugEmitterFrom(inner Emitter) Emitter { + return &debugEmitter{ + inner: inner, + } +} + +func (d *debugEmitter) Emit(msg proto.Message) (bool, error) { + ev := &pb.DebugEvent{ + Name: proto.MessageName(msg), + Text: proto.MarshalTextString(msg), + } + return d.inner.Emit(ev) +} + +func (d *debugEmitter) Close() error { + return d.inner.Close() +} diff --git a/pkg/eventchannel/event.proto b/pkg/eventchannel/event.proto new file mode 100644 index 000000000..455f03658 --- /dev/null +++ b/pkg/eventchannel/event.proto @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +// A debug event encapsulates any other event protobuf in text format. This is +// useful because clients reading events emitted this way do not need to link +// the event protobufs to display them in a human-readable format. +message DebugEvent { + // Name of the inner message. + string name = 1; + // Text representation of the inner message content. + string text = 2; +} diff --git a/pkg/fd/BUILD b/pkg/fd/BUILD new file mode 100644 index 000000000..e69d83d06 --- /dev/null +++ b/pkg/fd/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "fd", + srcs = ["fd.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/fd", + visibility = ["//visibility:public"], +) + +go_test( + name = "fd_test", + size = "small", + srcs = ["fd_test.go"], + embed = [":fd"], +) diff --git a/pkg/fd/fd.go b/pkg/fd/fd.go new file mode 100644 index 000000000..32d24c41b --- /dev/null +++ b/pkg/fd/fd.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fd provides types for working with file descriptors. +package fd + +import ( + "fmt" + "io" + "os" + "runtime" + "sync/atomic" + "syscall" +) + +// ReadWriter implements io.ReadWriter, io.ReaderAt, and io.WriterAt for fd. It +// does not take ownership of fd. +type ReadWriter struct { + // fd is accessed atomically so FD.Close/Release can swap it. + fd int64 +} + +var _ io.ReadWriter = (*ReadWriter)(nil) +var _ io.ReaderAt = (*ReadWriter)(nil) +var _ io.WriterAt = (*ReadWriter)(nil) + +// NewReadWriter creates a ReadWriter for fd. +func NewReadWriter(fd int) *ReadWriter { + return &ReadWriter{int64(fd)} +} + +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// Read implements io.Reader. +func (r *ReadWriter) Read(b []byte) (int, error) { + c, err := fixCount(syscall.Read(int(atomic.LoadInt64(&r.fd)), b)) + if c == 0 && len(b) > 0 && err == nil { + return 0, io.EOF + } + return c, err +} + +// ReadAt implements io.ReaderAt. +// +// ReadAt always returns a non-nil error when c < len(b). +func (r *ReadWriter) ReadAt(b []byte, off int64) (c int, err error) { + for len(b) > 0 { + var m int + m, err = fixCount(syscall.Pread(int(atomic.LoadInt64(&r.fd)), b, off)) + if m == 0 && err == nil { + return c, io.EOF + } + if err != nil { + return c, err + } + c += m + b = b[m:] + off += int64(m) + } + return +} + +// Write implements io.Writer. +func (r *ReadWriter) Write(b []byte) (int, error) { + var err error + var n, remaining int + for remaining = len(b); remaining > 0; { + woff := len(b) - remaining + n, err = syscall.Write(int(atomic.LoadInt64(&r.fd)), b[woff:]) + + if n > 0 { + // syscall.Write wrote some bytes. This is the common case. + remaining -= n + } else { + if err == nil { + // syscall.Write did not write anything nor did it return an error. + // + // There is no way to guarantee that a subsequent syscall.Write will + // make forward progress so just panic. + panic(fmt.Sprintf("syscall.Write returned %d with no error", n)) + } + + if err != syscall.EINTR { + // If the write failed for anything other than a signal, bail out. + break + } + } + } + + return len(b) - remaining, err +} + +// WriteAt implements io.WriterAt. +func (r *ReadWriter) WriteAt(b []byte, off int64) (c int, err error) { + for len(b) > 0 { + var m int + m, err = fixCount(syscall.Pwrite(int(atomic.LoadInt64(&r.fd)), b, off)) + if err != nil { + break + } + c += m + b = b[m:] + off += int64(m) + } + return +} + +// FD owns a host file descriptor. +// +// It is similar to os.File, with a few important distinctions: +// +// FD provies a Release() method which relinquishes ownership. Like os.File, +// FD adds a finalizer to close the backing FD. However, the finalizer cannot +// be removed from os.File, forever pinning the lifetime of an FD to its +// os.File. +// +// FD supports both blocking and non-blocking operation. os.File only +// supports blocking operation. +type FD struct { + ReadWriter +} + +// New creates a new FD. +// +// New takes ownership of fd. +func New(fd int) *FD { + if fd < 0 { + return &FD{ReadWriter{-1}} + } + f := &FD{ReadWriter{int64(fd)}} + runtime.SetFinalizer(f, (*FD).Close) + return f +} + +// NewFromFile creates a new FD from an os.File. +// +// NewFromFile does not transfer ownership of the file descriptor (it will be +// duplicated, so both the os.File and FD will eventually need to be closed +// and some (but not all) changes made to the FD will be applied to the +// os.File as well). +// +// The returned FD is always blocking (Go 1.9+). +func NewFromFile(file *os.File) (*FD, error) { + fd, err := syscall.Dup(int(file.Fd())) + if err != nil { + return &FD{ReadWriter{-1}}, err + } + return New(fd), nil +} + +// Close closes the file descriptor contained in the FD. +// +// Close is safe to call multiple times, but will return an error after the +// first call. +// +// Concurrently calling Close and any other method is undefined. +func (f *FD) Close() error { + runtime.SetFinalizer(f, nil) + return syscall.Close(int(atomic.SwapInt64(&f.fd, -1))) +} + +// Release relinquishes ownership of the contained file descriptor. +// +// Concurrently calling Release and any other method is undefined. +func (f *FD) Release() int { + runtime.SetFinalizer(f, nil) + return int(atomic.SwapInt64(&f.fd, -1)) +} + +// FD returns the file descriptor owned by FD. FD retains ownership. +func (f *FD) FD() int { + return int(atomic.LoadInt64(&f.fd)) +} + +// File converts the FD to an os.File. +// +// FD does not transfer ownership of the file descriptor (it will be +// duplicated, so both the FD and os.File will eventually need to be closed +// and some (but not all) changes made to the os.File will be applied to the +// FD as well). +// +// This operation is somewhat expensive, so care should be taken to minimize +// its use. +func (f *FD) File() (*os.File, error) { + fd, err := syscall.Dup(int(atomic.LoadInt64(&f.fd))) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), ""), nil +} + +// ReleaseToFile returns an os.File that takes ownership of the FD. +// +// name is passed to os.NewFile. +func (f *FD) ReleaseToFile(name string) *os.File { + return os.NewFile(uintptr(f.Release()), name) +} diff --git a/pkg/fd/fd_test.go b/pkg/fd/fd_test.go new file mode 100644 index 000000000..94b3eb7cc --- /dev/null +++ b/pkg/fd/fd_test.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fd + +import ( + "math" + "os" + "syscall" + "testing" +) + +func TestSetNegOne(t *testing.T) { + type entry struct { + name string + file *FD + fn func() error + } + var tests []entry + + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("syscall.Socket:", err) + } + f1 := New(fd) + tests = append(tests, entry{ + "Release", + f1, + func() error { + return syscall.Close(f1.Release()) + }, + }) + + fd, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("syscall.Socket:", err) + } + f2 := New(fd) + tests = append(tests, entry{ + "Close", + f2, + f2.Close, + }) + + for _, test := range tests { + if err := test.fn(); err != nil { + t.Errorf("%s: %v", test.name, err) + continue + } + if fd := test.file.FD(); fd != -1 { + t.Errorf("%s: got FD() = %d, want = -1", test.name, fd) + } + } +} + +func TestStartsNegOne(t *testing.T) { + type entry struct { + name string + file *FD + } + + tests := []entry{ + {"-1", New(-1)}, + {"-2", New(-2)}, + {"MinInt32", New(math.MinInt32)}, + {"MinInt64", New(math.MinInt64)}, + } + + for _, test := range tests { + if fd := test.file.FD(); fd != -1 { + t.Errorf("%s: got FD() = %d, want = -1", test.name, fd) + } + } +} + +func TestFileDotFile(t *testing.T) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("syscall.Socket:", err) + } + + f := New(fd) + of, err := f.File() + if err != nil { + t.Fatalf("File got err %v want nil", err) + } + + if ofd, nfd := int(of.Fd()), f.FD(); ofd == nfd || ofd == -1 { + // Try not to double close the FD. + f.Release() + + t.Fatalf("got %#v.File().Fd() = %d, want new FD", f, ofd) + } + + f.Close() + of.Close() +} + +func TestFileDotFileError(t *testing.T) { + f := &FD{ReadWriter{-2}} + + if of, err := f.File(); err == nil { + t.Errorf("File %v got nil err want non-nil", of) + of.Close() + } +} + +func TestNewFromFile(t *testing.T) { + f, err := NewFromFile(os.Stdin) + if err != nil { + t.Fatalf("NewFromFile got err %v want nil", err) + } + if nfd, ofd := f.FD(), int(os.Stdin.Fd()); nfd == -1 || nfd == ofd { + t.Errorf("got FD() = %d, want = new FD (old FD was %d)", nfd, ofd) + } + f.Close() +} + +func TestNewFromFileError(t *testing.T) { + f, err := NewFromFile(nil) + if err == nil { + t.Errorf("NewFromFile got %v with nil err want non-nil", f) + f.Close() + } +} diff --git a/pkg/gate/BUILD b/pkg/gate/BUILD new file mode 100644 index 000000000..381474d9e --- /dev/null +++ b/pkg/gate/BUILD @@ -0,0 +1,22 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "gate", + srcs = [ + "gate.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/gate", + visibility = ["//visibility:public"], +) + +go_test( + name = "gate_test", + srcs = [ + "gate_test.go", + ], + deps = [ + ":gate", + ], +) diff --git a/pkg/gate/gate.go b/pkg/gate/gate.go new file mode 100644 index 000000000..47651c761 --- /dev/null +++ b/pkg/gate/gate.go @@ -0,0 +1,120 @@ +// Package gate provides a usage Gate synchronization primitive. +package gate + +import ( + "sync/atomic" +) + +const ( + // gateClosed is the bit set in the gate's user count to indicate that + // it has been closed. It is the MSB of the 32-bit field; the other 31 + // bits carry the actual count. + gateClosed = 0x80000000 +) + +// Gate is a synchronization primitive that allows concurrent goroutines to +// "enter" it as long as it hasn't been closed yet. Once it's been closed, +// goroutines cannot enter it anymore, but are allowed to leave, and the closer +// will be informed when all goroutines have left. +// +// Many goroutines are allowed to enter the gate concurrently, but only one is +// allowed to close it. +// +// This is similar to a r/w critical section, except that goroutines "entering" +// never block: they either enter immediately or fail to enter. The closer will +// block waiting for all goroutines currently inside the gate to leave. +// +// This function is implemented efficiently. On x86, only one interlocked +// operation is performed on enter, and one on leave. +// +// This is useful, for example, in cases when a goroutine is trying to clean up +// an object for which multiple goroutines have pointers. In such a case, users +// would be required to enter and leave the gates, and the cleaner would wait +// until all users are gone (and no new ones are allowed) before proceeding. +// +// Users: +// +// if !g.Enter() { +// // Gate is closed, we can't use the object. +// return +// } +// +// // Do something with object. +// [...] +// +// g.Leave() +// +// Closer: +// +// // Prevent new users from using the object, and wait for the existing +// // ones to complete. +// g.Close() +// +// // Clean up the object. +// [...] +// +type Gate struct { + userCount uint32 + done chan struct{} +} + +// Enter tries to enter the gate. It will succeed if it hasn't been closed yet, +// in which case the caller must eventually call Leave(). +// +// This function is thread-safe. +func (g *Gate) Enter() bool { + if g == nil { + return false + } + + for { + v := atomic.LoadUint32(&g.userCount) + if v&gateClosed != 0 { + return false + } + + if atomic.CompareAndSwapUint32(&g.userCount, v, v+1) { + return true + } + } +} + +// Leave leaves the gate. This must only be called after a successful call to +// Enter(). If the gate has been closed and this is the last one inside the +// gate, it will notify the closer that the gate is done. +// +// This function is thread-safe. +func (g *Gate) Leave() { + for { + v := atomic.LoadUint32(&g.userCount) + if v&^gateClosed == 0 { + panic("leaving a gate with zero usage count") + } + + if atomic.CompareAndSwapUint32(&g.userCount, v, v-1) { + if v == gateClosed+1 { + close(g.done) + } + return + } + } +} + +// Close closes the gate for entering, and waits until all goroutines [that are +// currently inside the gate] leave before returning. +// +// Only one goroutine can call this function. +func (g *Gate) Close() { + for { + v := atomic.LoadUint32(&g.userCount) + if v&^gateClosed != 0 && g.done == nil { + g.done = make(chan struct{}) + } + if atomic.CompareAndSwapUint32(&g.userCount, v, v|gateClosed) { + if v&^gateClosed != 0 { + <-g.done + } + return + } + } +} diff --git a/pkg/gate/gate_test.go b/pkg/gate/gate_test.go new file mode 100644 index 000000000..b3b101a0c --- /dev/null +++ b/pkg/gate/gate_test.go @@ -0,0 +1,175 @@ +package gate_test + +import ( + "sync" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/gate" +) + +func TestBasicEnter(t *testing.T) { + var g gate.Gate + + if !g.Enter() { + t.Fatalf("Failed to enter when it should be allowed") + } + + g.Leave() + + g.Close() + + if g.Enter() { + t.Fatalf("Allowed to enter when it should fail") + } +} + +func enterFunc(t *testing.T, g *gate.Gate, enter, leave, reenter chan struct{}, done1, done2, done3 *sync.WaitGroup) { + // Wait until instructed to enter. + <-enter + if !g.Enter() { + t.Errorf("Failed to enter when it should be allowed") + } + + done1.Done() + + // Wait until instructed to leave. + <-leave + g.Leave() + + done2.Done() + + // Wait until instructed to reenter. + <-reenter + if g.Enter() { + t.Errorf("Allowed to enter when it should fail") + } + done3.Done() +} + +func TestConcurrentEnter(t *testing.T) { + var g gate.Gate + var done1, done2, done3 sync.WaitGroup + + // Create 1000 worker goroutines. + enter := make(chan struct{}) + leave := make(chan struct{}) + reenter := make(chan struct{}) + done1.Add(1000) + done2.Add(1000) + done3.Add(1000) + for i := 0; i < 1000; i++ { + go enterFunc(t, &g, enter, leave, reenter, &done1, &done2, &done3) + } + + // Tell them all to enter, then leave. + close(enter) + done1.Wait() + + close(leave) + done2.Wait() + + // Close the gate, then have the workers try to enter again. + g.Close() + close(reenter) + done3.Wait() +} + +func closeFunc(g *gate.Gate, done chan struct{}) { + g.Close() + close(done) +} + +func TestCloseWaits(t *testing.T) { + var g gate.Gate + + // Enter 10 times. + for i := 0; i < 10; i++ { + if !g.Enter() { + t.Fatalf("Failed to enter when it should be allowed") + } + } + + // Launch closer. Check that it doesn't complete. + done := make(chan struct{}) + go closeFunc(&g, done) + + for i := 0; i < 10; i++ { + select { + case <-done: + t.Fatalf("Close function completed too soon") + case <-time.After(100 * time.Millisecond): + } + + g.Leave() + } + + // Now the closer must complete. + <-done +} + +func TestMultipleSerialCloses(t *testing.T) { + var g gate.Gate + + // Enter 10 times. + for i := 0; i < 10; i++ { + if !g.Enter() { + t.Fatalf("Failed to enter when it should be allowed") + } + } + + // Launch closer. Check that it doesn't complete. + done := make(chan struct{}) + go closeFunc(&g, done) + + for i := 0; i < 10; i++ { + select { + case <-done: + t.Fatalf("Close function completed too soon") + case <-time.After(100 * time.Millisecond): + } + + g.Leave() + } + + // Now the closer must complete. + <-done + + // Close again should not block. + done = make(chan struct{}) + go closeFunc(&g, done) + + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatalf("Second Close is blocking") + } +} + +func worker(g *gate.Gate, done *sync.WaitGroup) { + for { + if !g.Enter() { + break + } + g.Leave() + } + done.Done() +} + +func TestConcurrentAll(t *testing.T) { + var g gate.Gate + var done sync.WaitGroup + + // Launch 1000 goroutines to concurrently enter/leave. + done.Add(1000) + for i := 0; i < 1000; i++ { + go worker(&g, &done) + } + + // Wait for the goroutines to do some work, then close the gate. + time.Sleep(2 * time.Second) + g.Close() + + // Wait for all of them to complete. + done.Wait() +} diff --git a/pkg/hashio/BUILD b/pkg/hashio/BUILD new file mode 100644 index 000000000..aaa58b58f --- /dev/null +++ b/pkg/hashio/BUILD @@ -0,0 +1,19 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "hashio", + srcs = [ + "hashio.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/hashio", + visibility = ["//:sandbox"], +) + +go_test( + name = "hashio_test", + size = "small", + srcs = ["hashio_test.go"], + embed = [":hashio"], +) diff --git a/pkg/hashio/hashio.go b/pkg/hashio/hashio.go new file mode 100644 index 000000000..d97948850 --- /dev/null +++ b/pkg/hashio/hashio.go @@ -0,0 +1,295 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package hashio provides hash-verified I/O streams. + +The I/O stream format is defined as follows. + +/-----------------------------------------\ +| payload | ++-----------------------------------------+ +| hash | ++-----------------------------------------+ +| payload | ++-----------------------------------------+ +| hash | ++-----------------------------------------+ +| ...... | +\-----------------------------------------/ + +Payload bytes written to / read from the stream are automatically split +into segments, each followed by a hash. All data read out must have already +passed hash verification. Hence the client code can safely do any kind of +(stream) processing of these data. +*/ +package hashio + +import ( + "crypto/hmac" + "errors" + "hash" + "io" + "sync" +) + +// SegmentSize is the unit we split payload data and insert hash at. +const SegmentSize = 8 * 1024 + +// ErrHashMismatch is returned if the ErrHashMismatch does not match. +var ErrHashMismatch = errors.New("hash mismatch") + +// writer computes hashs during writes. +type writer struct { + mu sync.Mutex + w io.Writer + h hash.Hash + written int + closed bool + hashv []byte +} + +// NewWriter creates a hash-verified IO stream writer. +func NewWriter(w io.Writer, h hash.Hash) io.WriteCloser { + return &writer{ + w: w, + h: h, + hashv: make([]byte, h.Size()), + } +} + +// Write writes the given data. +func (w *writer) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // Did we already close? + if w.closed { + return 0, io.ErrUnexpectedEOF + } + + for done := 0; done < len(p); { + // Slice the data at segment boundary. + left := SegmentSize - w.written + if left > len(p[done:]) { + left = len(p[done:]) + } + + // Write the rest of the segment and write to hash writer the + // same number of bytes. Hash.Write may never return an error. + n, err := w.w.Write(p[done : done+left]) + w.h.Write(p[done : done+left]) + w.written += n + done += n + + // And only check the actual write errors here. + if n == 0 && err != nil { + return done, err + } + + // Write hash if starting a new segment. + if w.written == SegmentSize { + if err := w.closeSegment(); err != nil { + return done, err + } + } + } + + return len(p), nil +} + +// closeSegment closes the current segment and writes out its hash. +func (w *writer) closeSegment() error { + // Serialize and write the current segment's hash. + hashv := w.h.Sum(w.hashv[:0]) + for done := 0; done < len(hashv); { + n, err := w.w.Write(hashv[done:]) + done += n + if n == 0 && err != nil { + return err + } + } + w.written = 0 // reset counter. + return nil +} + +// Close writes the final hash to the stream and closes the underlying Writer. +func (w *writer) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + // Did we already close? + if w.closed { + return io.ErrUnexpectedEOF + } + + // Always mark as closed, regardless of errors. + w.closed = true + + // Write the final segment. + if err := w.closeSegment(); err != nil { + return err + } + + // Call the underlying closer. + if c, ok := w.w.(io.Closer); ok { + return c.Close() + } + return nil +} + +// reader computes and verifies hashs during reads. +type reader struct { + mu sync.Mutex + r io.Reader + h hash.Hash + + // data is remaining verified but unused payload data. This is + // populated on short reads and may be consumed without any + // verification. + data [SegmentSize]byte + + // index is the index into data above. + index int + + // available is the amount of valid data above. + available int + + // hashv is the read hash for the current segment. + hashv []byte + + // computev is the computed hash for the current segment. + computev []byte +} + +// NewReader creates a hash-verified IO stream reader. +func NewReader(r io.Reader, h hash.Hash) io.Reader { + return &reader{ + r: r, + h: h, + hashv: make([]byte, h.Size()), + computev: make([]byte, h.Size()), + } +} + +// readSegment reads a segment and hash vector. +// +// Precondition: datav must have length SegmentSize. +func (r *reader) readSegment(datav []byte) (data []byte, err error) { + // Make two reads: the first is the segment, the second is the hash + // which needs verification. We may need to adjust the resulting slices + // in the case of short reads. + for done := 0; done < SegmentSize; { + n, err := r.r.Read(datav[done:]) + done += n + if n == 0 && err == io.EOF { + if done == 0 { + // No data at all. + return nil, io.EOF + } else if done < len(r.hashv) { + // Not enough for a hash. + return nil, ErrHashMismatch + } + // Truncate the data and copy to the hash. + copy(r.hashv, datav[done-len(r.hashv):]) + datav = datav[:done-len(r.hashv)] + return datav, nil + } else if n == 0 && err != nil { + return nil, err + } + } + for done := 0; done < len(r.hashv); { + n, err := r.r.Read(r.hashv[done:]) + done += n + if n == 0 && err == io.EOF { + // Copy over from the data. + missing := len(r.hashv) - done + copy(r.hashv[missing:], r.hashv[:done]) + copy(r.hashv[:missing], datav[len(datav)-missing:]) + datav = datav[:len(datav)-missing] + return datav, nil + } else if n == 0 && err != nil { + return nil, err + } + } + return datav, nil +} + +// verifyHash verifies the given hash. +// +// The passed hash will be returned to the pool. +func (r *reader) verifyHash(datav []byte) error { + for done := 0; done < len(datav); { + n, _ := r.h.Write(datav[done:]) + done += n + } + computev := r.h.Sum(r.computev[:0]) + if !hmac.Equal(r.hashv, computev) { + return ErrHashMismatch + } + return nil +} + +// Read reads the data. +func (r *reader) Read(p []byte) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + + for done := 0; done < len(p); { + // Check for pending data. + if r.index < r.available { + n := copy(p[done:], r.data[r.index:r.available]) + done += n + r.index += n + continue + } + + // Prepare the next read. + var ( + datav []byte + inline bool + ) + + // We need to read a new segment. Can we read directly? + if len(p[done:]) >= SegmentSize { + datav = p[done : done+SegmentSize] + inline = true + } else { + datav = r.data[:] + inline = false + } + + // Read the next segments. + datav, err := r.readSegment(datav) + if err != nil && err != io.EOF { + return 0, err + } else if err == io.EOF { + return done, io.EOF + } + if err := r.verifyHash(datav); err != nil { + return done, err + } + + if inline { + // Move the cursor. + done += len(datav) + } else { + // Reset index & available. + r.index = 0 + r.available = len(datav) + } + } + + return len(p), nil +} diff --git a/pkg/hashio/hashio_test.go b/pkg/hashio/hashio_test.go new file mode 100644 index 000000000..41dbdf860 --- /dev/null +++ b/pkg/hashio/hashio_test.go @@ -0,0 +1,142 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashio + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "fmt" + "io" + "math/rand" + "testing" +) + +var testKey = []byte("01234567890123456789012345678901") + +func runTest(c []byte, fn func(enc *bytes.Buffer), iters int) error { + // Encoding happens via a buffer. + var ( + enc bytes.Buffer + dec bytes.Buffer + ) + + for i := 0; i < iters; i++ { + enc.Reset() + w := NewWriter(&enc, hmac.New(sha256.New, testKey)) + if _, err := io.Copy(w, bytes.NewBuffer(c)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + } + + fn(&enc) + + for i := 0; i < iters; i++ { + dec.Reset() + r := NewReader(bytes.NewReader(enc.Bytes()), hmac.New(sha256.New, testKey)) + if _, err := io.Copy(&dec, r); err != nil { + return err + } + } + + // Check that the data matches; this should never fail. + if !bytes.Equal(c, dec.Bytes()) { + panic(fmt.Sprintf("data didn't match: got %v, expected %v", dec.Bytes(), c)) + } + + return nil +} + +func TestTable(t *testing.T) { + cases := [][]byte{ + // Various data sizes. + nil, + []byte(""), + []byte("_"), + []byte("0"), + []byte("01"), + []byte("012"), + []byte("0123"), + []byte("01234"), + []byte("012356"), + []byte("0123567"), + []byte("01235678"), + + // Make sure we have one longer than the hash length. + []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), + + // Make sure we have one longer than the segment size. + make([]byte, 3*SegmentSize), + make([]byte, 3*SegmentSize-1), + make([]byte, 3*SegmentSize+1), + make([]byte, 3*SegmentSize-32), + make([]byte, 3*SegmentSize+32), + make([]byte, 30*SegmentSize), + } + + for _, c := range cases { + for _, flip := range []bool{false, true} { + if len(c) == 0 && flip == true { + continue + } + + // Log the case. + t.Logf("case: len=%d flip=%v", len(c), flip) + + if err := runTest(c, func(enc *bytes.Buffer) { + if flip { + corrupted := rand.Intn(enc.Len()) + enc.Bytes()[corrupted]++ + } + }, 1); err != nil { + if !flip || err != ErrHashMismatch { + t.Errorf("error during read: got %v, expected nil", err) + } + continue + } else if flip { + t.Errorf("failed to detect ErrHashMismatch on corrupted data!") + continue + } + } + } +} + +const benchBytes = 10 * 1024 * 1024 // 10 MB. + +func BenchmarkWrite(b *testing.B) { + b.StopTimer() + x := make([]byte, benchBytes) + b.SetBytes(benchBytes) + b.StartTimer() + if err := runTest(x, func(enc *bytes.Buffer) { + b.StopTimer() + }, b.N); err != nil { + b.Errorf("benchmark failed: %v", err) + } +} + +func BenchmarkRead(b *testing.B) { + b.StopTimer() + x := make([]byte, benchBytes) + b.SetBytes(benchBytes) + if err := runTest(x, func(enc *bytes.Buffer) { + b.StartTimer() + }, b.N); err != nil { + b.Errorf("benchmark failed: %v", err) + } +} diff --git a/pkg/ilist/BUILD b/pkg/ilist/BUILD new file mode 100644 index 000000000..937ac3876 --- /dev/null +++ b/pkg/ilist/BUILD @@ -0,0 +1,66 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "list_state", + srcs = [ + "interface_list.go", + ], + out = "interface_list_state.go", + package = "ilist", +) + +go_library( + name = "ilist", + srcs = [ + "interface_list.go", + "interface_list_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/ilist", + visibility = ["//visibility:public"], + deps = [ + "//pkg/state", + ], +) + +go_template_instance( + name = "interface_list", + out = "interface_list.go", + package = "ilist", + template = ":generic_list", + types = {}, +) + +# This list is used for benchmarking. +go_template_instance( + name = "test_list", + out = "test_list.go", + package = "ilist", + prefix = "direct", + template = ":generic_list", + types = { + "Linker": "*direct", + }, +) + +go_test( + name = "list_test", + size = "small", + srcs = [ + "list_test.go", + "test_list.go", + ], + embed = [":ilist"], +) + +go_template( + name = "generic_list", + srcs = [ + "list.go", + ], + opt_types = ["Linker"], + visibility = ["//visibility:public"], +) diff --git a/pkg/ilist/list.go b/pkg/ilist/list.go new file mode 100644 index 000000000..739575f17 --- /dev/null +++ b/pkg/ilist/list.go @@ -0,0 +1,171 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ilist provides the implementation of intrusive linked lists. +package ilist + +// Linker is the interface that objects must implement if they want to be added +// to and/or removed from List objects. +// +// N.B. When substituted in a template instantiation, Linker doesn't need to +// be an interface, and in most cases won't be. +type Linker interface { + Next() Linker + Prev() Linker + SetNext(Linker) + SetPrev(Linker) +} + +// List is an intrusive list. Entries can be added to or removed from the list +// in O(1) time and with no additional memory allocations. +// +// The zero value for List is an empty list ready to use. +// +// To iterate over a list (where l is a List): +// for e := l.Front(); e != nil; e = e.Next() { +// // do something with e. +// } +type List struct { + head Linker + tail Linker +} + +// Reset resets list l to the empty state. +func (l *List) Reset() { + l.head = nil + l.tail = nil +} + +// Empty returns true iff the list is empty. +func (l *List) Empty() bool { + return l.head == nil +} + +// Front returns the first element of list l or nil. +func (l *List) Front() Linker { + return l.head +} + +// Back returns the last element of list l or nil. +func (l *List) Back() Linker { + return l.tail +} + +// PushFront inserts the element e at the front of list l. +func (l *List) PushFront(e Linker) { + e.SetNext(l.head) + e.SetPrev(nil) + + if l.head != nil { + l.head.SetPrev(e) + } else { + l.tail = e + } + + l.head = e +} + +// PushBack inserts the element e at the back of list l. +func (l *List) PushBack(e Linker) { + e.SetNext(nil) + e.SetPrev(l.tail) + + if l.tail != nil { + l.tail.SetNext(e) + } else { + l.head = e + } + + l.tail = e +} + +// PushBackList inserts list m at the end of list l, emptying m. +func (l *List) PushBackList(m *List) { + if l.head == nil { + l.head = m.head + l.tail = m.tail + } else if m.head != nil { + l.tail.SetNext(m.head) + m.head.SetPrev(l.tail) + + l.tail = m.tail + } + + m.head = nil + m.tail = nil +} + +// InsertAfter inserts e after b. +func (l *List) InsertAfter(b, e Linker) { + a := b.Next() + e.SetNext(a) + e.SetPrev(b) + b.SetNext(e) + + if a != nil { + a.SetPrev(e) + } else { + l.tail = e + } +} + +// InsertBefore inserts e before a. +func (l *List) InsertBefore(a, e Linker) { + b := a.Prev() + e.SetNext(a) + e.SetPrev(b) + a.SetPrev(e) + + if b != nil { + b.SetNext(e) + } else { + l.head = e + } +} + +// Remove removes e from l. +func (l *List) Remove(e Linker) { + prev := e.Prev() + next := e.Next() + + if prev != nil { + prev.SetNext(next) + } else { + l.head = next + } + + if next != nil { + next.SetPrev(prev) + } else { + l.tail = prev + } +} + +// Entry is a default implementation of Linker. Users can add anonymous fields +// of this type to their structs to make them automatically implement the +// methods needed by List. +type Entry struct { + next Linker + prev Linker +} + +// Next returns the entry that follows e in the list. +func (e *Entry) Next() Linker { + return e.next +} + +// Prev returns the entry that precedes e in the list. +func (e *Entry) Prev() Linker { + return e.prev +} + +// SetNext assigns 'entry' as the entry that follows e in the list. +func (e *Entry) SetNext(entry Linker) { + e.next = entry +} + +// SetPrev assigns 'entry' as the entry that precedes e in the list. +func (e *Entry) SetPrev(entry Linker) { + e.prev = entry +} diff --git a/pkg/ilist/list_test.go b/pkg/ilist/list_test.go new file mode 100644 index 000000000..7f2b90e0c --- /dev/null +++ b/pkg/ilist/list_test.go @@ -0,0 +1,233 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ilist + +import ( + "runtime" + "testing" +) + +type testEntry struct { + Entry + value int +} + +type direct struct { + directEntry + value int +} + +func verifyEquality(t *testing.T, entries []testEntry, l *List) { + i := 0 + for it := l.Front(); it != nil; it = it.Next() { + e := it.(*testEntry) + if e != &entries[i] { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Wrong entry at index (%s:%d): %v", file, line, i) + return + } + i++ + } + + if i != len(entries) { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Wrong number of entries (%s:%d)", file, line) + return + } + + i = 0 + for it := l.Back(); it != nil; it = it.Prev() { + e := it.(*testEntry) + if e != &entries[len(entries)-1-i] { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Wrong entry at index (%s:%d): %v", file, line, i) + return + } + i++ + } + + if i != len(entries) { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Wrong number of entries (%s:%d)", file, line) + return + } +} + +func TestZeroEmpty(t *testing.T) { + var l List + if l.Front() != nil { + t.Error("Front is non-nil") + } + if l.Back() != nil { + t.Error("Back is non-nil") + } +} + +func TestPushBack(t *testing.T) { + var l List + + // Test single entry insertion. + var entry testEntry + l.PushBack(&entry) + + e := l.Front().(*testEntry) + if e != &entry { + t.Error("Wrong entry returned") + } + + // Test inserting 100 entries. + l.Reset() + var entries [100]testEntry + for i := range entries { + l.PushBack(&entries[i]) + } + + verifyEquality(t, entries[:], &l) +} + +func TestPushFront(t *testing.T) { + var l List + + // Test single entry insertion. + var entry testEntry + l.PushFront(&entry) + + e := l.Front().(*testEntry) + if e != &entry { + t.Error("Wrong entry returned") + } + + // Test inserting 100 entries. + l.Reset() + var entries [100]testEntry + for i := range entries { + l.PushFront(&entries[len(entries)-1-i]) + } + + verifyEquality(t, entries[:], &l) +} + +func TestRemove(t *testing.T) { + // Remove entry from single-element list. + var l List + var entry testEntry + l.PushBack(&entry) + l.Remove(&entry) + if l.Front() != nil { + t.Error("List is empty") + } + + var entries [100]testEntry + + // Remove single element from lists of lengths 2 to 101. + for n := 1; n <= len(entries); n++ { + for extra := 0; extra <= n; extra++ { + l.Reset() + for i := 0; i < n; i++ { + if extra == i { + l.PushBack(&entry) + } + l.PushBack(&entries[i]) + } + if extra == n { + l.PushBack(&entry) + } + + l.Remove(&entry) + verifyEquality(t, entries[:n], &l) + } + } +} + +func TestReset(t *testing.T) { + var l List + + // Resetting list of one element. + l.PushBack(&testEntry{}) + if l.Front() == nil { + t.Error("List is empty") + } + + l.Reset() + if l.Front() != nil { + t.Error("List is not empty") + } + + // Resetting list of 10 elements. + for i := 0; i < 10; i++ { + l.PushBack(&testEntry{}) + } + + if l.Front() == nil { + t.Error("List is empty") + } + + l.Reset() + if l.Front() != nil { + t.Error("List is not empty") + } + + // Resetting empty list. + l.Reset() + if l.Front() != nil { + t.Error("List is not empty") + } +} + +func BenchmarkIterateForward(b *testing.B) { + var l List + for i := 0; i < 1000000; i++ { + l.PushBack(&testEntry{value: i}) + } + + for i := b.N; i > 0; i-- { + tmp := 0 + for e := l.Front(); e != nil; e = e.Next() { + tmp += e.(*testEntry).value + } + } +} + +func BenchmarkIterateBackward(b *testing.B) { + var l List + for i := 0; i < 1000000; i++ { + l.PushBack(&testEntry{value: i}) + } + + for i := b.N; i > 0; i-- { + tmp := 0 + for e := l.Back(); e != nil; e = e.Prev() { + tmp += e.(*testEntry).value + } + } +} + +func BenchmarkDirectIterateForward(b *testing.B) { + var l directList + for i := 0; i < 1000000; i++ { + l.PushBack(&direct{value: i}) + } + + for i := b.N; i > 0; i-- { + tmp := 0 + for e := l.Front(); e != nil; e = e.Next() { + tmp += e.value + } + } +} + +func BenchmarkDirectIterateBackward(b *testing.B) { + var l directList + for i := 0; i < 1000000; i++ { + l.PushBack(&direct{value: i}) + } + + for i := b.N; i > 0; i-- { + tmp := 0 + for e := l.Back(); e != nil; e = e.Prev() { + tmp += e.value + } + } +} diff --git a/pkg/linewriter/BUILD b/pkg/linewriter/BUILD new file mode 100644 index 000000000..4a96c6f1d --- /dev/null +++ b/pkg/linewriter/BUILD @@ -0,0 +1,16 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "linewriter", + srcs = ["linewriter.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/linewriter", + visibility = ["//visibility:public"], +) + +go_test( + name = "linewriter_test", + srcs = ["linewriter_test.go"], + embed = [":linewriter"], +) diff --git a/pkg/linewriter/linewriter.go b/pkg/linewriter/linewriter.go new file mode 100644 index 000000000..98f974410 --- /dev/null +++ b/pkg/linewriter/linewriter.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package linewriter provides an io.Writer which calls an emitter on each line. +package linewriter + +import ( + "bytes" + "sync" +) + +// Writer is an io.Writer which buffers input, flushing +// individual lines through an emitter function. +type Writer struct { + // the mutex locks buf. + sync.Mutex + + // buf holds the data we haven't emitted yet. + buf bytes.Buffer + + // emit is used to flush individual lines. + emit func(p []byte) +} + +// NewWriter creates a Writer which emits using emitter. +// The emitter must not retain p. It may change after emitter returns. +func NewWriter(emitter func(p []byte)) *Writer { + return &Writer{emit: emitter} +} + +// Write implements io.Writer.Write. +// It calls emit on each line of input, not including the newline. +// Write may be called concurrently. +func (w *Writer) Write(p []byte) (int, error) { + w.Lock() + defer w.Unlock() + + total := 0 + for len(p) > 0 { + emit := true + i := bytes.IndexByte(p, '\n') + if i < 0 { + // No newline, we will buffer everything. + i = len(p) + emit = false + } + + n, err := w.buf.Write(p[:i]) + if err != nil { + return total, err + } + total += n + + p = p[i:] + + if emit { + // Skip the newline, but still count it. + p = p[1:] + total++ + + w.emit(w.buf.Bytes()) + w.buf.Reset() + } + } + + return total, nil +} diff --git a/pkg/linewriter/linewriter_test.go b/pkg/linewriter/linewriter_test.go new file mode 100644 index 000000000..ce97cca05 --- /dev/null +++ b/pkg/linewriter/linewriter_test.go @@ -0,0 +1,81 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linewriter + +import ( + "bytes" + "testing" +) + +func TestWriter(t *testing.T) { + testCases := []struct { + input []string + want []string + }{ + { + input: []string{"1\n", "2\n"}, + want: []string{"1", "2"}, + }, + { + input: []string{"1\n", "\n", "2\n"}, + want: []string{"1", "", "2"}, + }, + { + input: []string{"1\n2\n", "3\n"}, + want: []string{"1", "2", "3"}, + }, + { + input: []string{"1", "2\n"}, + want: []string{"12"}, + }, + { + // Data with no newline yet is omitted. + input: []string{"1\n", "2\n", "3"}, + want: []string{"1", "2"}, + }, + } + + for _, c := range testCases { + var lines [][]byte + + w := NewWriter(func(p []byte) { + // We must not retain p, so we must make a copy. + b := make([]byte, len(p)) + copy(b, p) + + lines = append(lines, b) + }) + + for _, in := range c.input { + n, err := w.Write([]byte(in)) + if err != nil { + t.Errorf("Write(%q) err got %v want nil (case %+v)", in, err, c) + } + if n != len(in) { + t.Errorf("Write(%q) b got %d want %d (case %+v)", in, n, len(in), c) + } + } + + if len(lines) != len(c.want) { + t.Errorf("len(lines) got %d want %d (case %+v)", len(lines), len(c.want), c) + } + + for i := range lines { + if !bytes.Equal(lines[i], []byte(c.want[i])) { + t.Errorf("item %d got %q want %q (case %+v)", i, lines[i], c.want[i], c) + } + } + } +} diff --git a/pkg/log/BUILD b/pkg/log/BUILD new file mode 100644 index 000000000..2530cfd18 --- /dev/null +++ b/pkg/log/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "log", + srcs = [ + "glog.go", + "glog_unsafe.go", + "json.go", + "log.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/log", + visibility = [ + "//visibility:public", + ], + deps = ["//pkg/linewriter"], +) + +go_test( + name = "log_test", + size = "small", + srcs = [ + "json_test.go", + "log_test.go", + ], + embed = [":log"], +) diff --git a/pkg/log/glog.go b/pkg/log/glog.go new file mode 100644 index 000000000..58b4052e6 --- /dev/null +++ b/pkg/log/glog.go @@ -0,0 +1,163 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "os" + "time" +) + +// GoogleEmitter is a wrapper that emits logs in a format compatible with +// package github.com/golang/glog. +type GoogleEmitter struct { + // Emitter is the underlying emitter. + Emitter +} + +// buffer is a simple inline buffer to avoid churn. The data slice is generally +// kept to the local byte array, and we avoid having to allocate it on the heap. +type buffer struct { + local [256]byte + data []byte +} + +func (b *buffer) start() { + b.data = b.local[:0] +} + +func (b *buffer) String() string { + return unsafeString(b.data) +} + +func (b *buffer) write(c byte) { + b.data = append(b.data, c) +} + +func (b *buffer) writeAll(d []byte) { + b.data = append(b.data, d...) +} + +func (b *buffer) writeOneDigit(d byte) { + b.write('0' + d) +} + +func (b *buffer) writeTwoDigits(v int) { + v = v % 100 + b.writeOneDigit(byte(v / 10)) + b.writeOneDigit(byte(v % 10)) +} + +func (b *buffer) writeSixDigits(v int) { + v = v % 1000000 + b.writeOneDigit(byte(v / 100000)) + b.writeOneDigit(byte((v % 100000) / 10000)) + b.writeOneDigit(byte((v % 10000) / 1000)) + b.writeOneDigit(byte((v % 1000) / 100)) + b.writeOneDigit(byte((v % 100) / 10)) + b.writeOneDigit(byte(v % 10)) +} + +func calculateBytes(v int, pad int) []byte { + var d []byte + r := 1 + + for n := 10; v >= r; n = n * 10 { + d = append(d, '0'+byte((v%n)/r)) + r = n + } + + for i := len(d); i < pad; i++ { + d = append(d, ' ') + } + + for i := 0; i < len(d)/2; i++ { + d[i], d[len(d)-(i+1)] = d[len(d)-(i+1)], d[i] + } + return d +} + +// pid is used for the threadid component of the header. +// +// The glog package logger uses 7 spaces of padding. See +// glob.loggingT.formatHeader. +var pid = calculateBytes(os.Getpid(), 7) + +// caller is faked out as the caller. See FIXME below. +var caller = []byte("x:0") + +// Emit emits the message, google-style. +func (g GoogleEmitter) Emit(level Level, timestamp time.Time, format string, args ...interface{}) { + var b buffer + b.start() + + // Log lines have this form: + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + // + // where the fields are defined as follows: + // L A single character, representing the log level (eg 'I' for INFO) + // mm The month (zero padded; ie May is '05') + // dd The day (zero padded) + // hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + // threadid The space-padded thread ID as returned by GetTID() + // file The file name + // line The line number + // msg The user-supplied message + + // Log level. + switch level { + case Debug: + b.write('D') + case Info: + b.write('I') + case Warning: + b.write('W') + } + + // Timestamp. + _, month, day := timestamp.Date() + hour, minute, second := timestamp.Clock() + b.writeTwoDigits(int(month)) + b.writeTwoDigits(int(day)) + b.write(' ') + b.writeTwoDigits(int(hour)) + b.write(':') + b.writeTwoDigits(int(minute)) + b.write(':') + b.writeTwoDigits(int(second)) + b.write('.') + b.writeSixDigits(int(timestamp.Nanosecond() / 1000)) + b.write(' ') + + // The pid. + b.writeAll(pid) + b.write(' ') + + // FIXME: The caller, fabricated. This really sucks, but it + // is unacceptable to put runtime.Callers() in the hot path. + b.writeAll(caller) + b.write(']') + b.write(' ') + + // User-provided format string, copied. + for i := 0; i < len(format); i++ { + b.write(format[i]) + } + + // End with a newline. + b.write('\n') + + // Pass to the underlying routine. + g.Emitter.Emit(level, timestamp, b.String(), args...) +} diff --git a/pkg/log/glog_unsafe.go b/pkg/log/glog_unsafe.go new file mode 100644 index 000000000..c320190b8 --- /dev/null +++ b/pkg/log/glog_unsafe.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "reflect" + "unsafe" +) + +// unsafeString returns a string that points to the given byte array. +// The byte array must be preserved until the string is disposed. +func unsafeString(data []byte) (s string) { + if len(data) == 0 { + return + } + + (*reflect.StringHeader)(unsafe.Pointer(&s)).Data = uintptr(unsafe.Pointer(&data[0])) + (*reflect.StringHeader)(unsafe.Pointer(&s)).Len = len(data) + return +} diff --git a/pkg/log/json.go b/pkg/log/json.go new file mode 100644 index 000000000..3887f1cd5 --- /dev/null +++ b/pkg/log/json.go @@ -0,0 +1,76 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "encoding/json" + "fmt" + "time" +) + +type jsonLog struct { + Msg string `json:"msg"` + Level Level `json:"level"` + Time time.Time `json:"time"` +} + +// MarshalJSON implements json.Marshaler.MarashalJSON. +func (lv Level) MarshalJSON() ([]byte, error) { + switch lv { + case Warning: + return []byte(`"warning"`), nil + case Info: + return []byte(`"info"`), nil + case Debug: + return []byte(`"debug"`), nil + default: + return nil, fmt.Errorf("unknown level %v", lv) + } +} + +// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON. It can unmarshal +// from both string names and integers. +func (lv *Level) UnmarshalJSON(b []byte) error { + switch s := string(b); s { + case "0", `"warning"`: + *lv = Warning + case "1", `"info"`: + *lv = Info + case "2", `"debug"`: + *lv = Debug + default: + return fmt.Errorf("unknown level %q", s) + } + return nil +} + +// JSONEmitter logs messages in json format. +type JSONEmitter struct { + Writer +} + +// Emit implements Emitter.Emit. +func (e JSONEmitter) Emit(level Level, timestamp time.Time, format string, v ...interface{}) { + j := jsonLog{ + Msg: fmt.Sprintf(format, v...), + Level: level, + Time: timestamp, + } + b, err := json.Marshal(j) + if err != nil { + panic(err) + } + e.Writer.Write(b) +} diff --git a/pkg/log/json_test.go b/pkg/log/json_test.go new file mode 100644 index 000000000..3b167dab0 --- /dev/null +++ b/pkg/log/json_test.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "encoding/json" + "testing" +) + +// Tests that Level can marshal/unmarshal properly. +func TestLevelMarshal(t *testing.T) { + lvs := []Level{Warning, Info, Debug} + for _, lv := range lvs { + bs, err := lv.MarshalJSON() + if err != nil { + t.Errorf("error marshaling %v: %v", lv, err) + } + var lv2 Level + if err := lv2.UnmarshalJSON(bs); err != nil { + t.Errorf("error unmarshaling %v: %v", bs, err) + } + if lv != lv2 { + t.Errorf("marshal/unmarshal level got %v wanted %v", lv2, lv) + } + } +} + +// Test that integers can be properly unmarshaled. +func TestUnmarshalFromInt(t *testing.T) { + tcs := []struct { + i int + want Level + }{ + {0, Warning}, + {1, Info}, + {2, Debug}, + } + + for _, tc := range tcs { + j, err := json.Marshal(tc.i) + if err != nil { + t.Errorf("error marshaling %v: %v", tc.i, err) + } + var lv Level + if err := lv.UnmarshalJSON(j); err != nil { + t.Errorf("error unmarshaling %v: %v", j, err) + } + if lv != tc.want { + t.Errorf("marshal/unmarshal %v got %v want %v", tc.i, lv, tc.want) + } + } +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 000000000..110e0e196 --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,323 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package log implements a library for logging. +// +// This is separate from the standard logging package because logging may be a +// high-impact activity, and therefore we wanted to provide as much flexibility +// as possible in the underlying implementation. +package log + +import ( + "fmt" + "io" + stdlog "log" + "os" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/linewriter" +) + +// Level is the log level. +type Level uint32 + +// The following levels are fixed, and can never be changed. Since some control +// RPCs allow for changing the level as an integer, it is only possible to add +// additional levels, and the existing one cannot be removed. +const ( + // Warning indicates that output should always be emitted. + Warning Level = iota + + // Info indicates that output should normally be emitted. + Info + + // Debug indicates that output should not normally be emitted. + Debug +) + +// Emitter is the final destination for logs. +type Emitter interface { + // Emit emits the given log statement. This allows for control over the + // timestamp used for logging. + Emit(level Level, timestamp time.Time, format string, v ...interface{}) +} + +// Writer writes the output to the given writer. +type Writer struct { + // Next is where output is written. + Next io.Writer + + // mu protects fields below. + mu sync.Mutex + + // errors counts failures to write log messages so it can be reported + // when writer start to work again. Needs to be accessed using atomics + // to make race detector happy because it's read outside the mutex. + errors int32 +} + +// Write writes out the given bytes, handling non-blocking sockets. +func (l *Writer) Write(data []byte) (int, error) { + n := 0 + + for n < len(data) { + w, err := l.Next.Write(data[n:]) + n += w + + // Is it a non-blocking socket? + if pathErr, ok := err.(*os.PathError); ok && pathErr.Err == syscall.EAGAIN { + runtime.Gosched() + continue + } + + // Some other error? + if err != nil { + l.mu.Lock() + atomic.AddInt32(&l.errors, 1) + l.mu.Unlock() + return n, err + } + } + + // Do we need to end with a '\n'? + if len(data) == 0 || data[len(data)-1] != '\n' { + l.Write([]byte{'\n'}) + } + + // Dirty read in case there were errors (rare). + if atomic.LoadInt32(&l.errors) > 0 { + l.mu.Lock() + defer l.mu.Unlock() + + // Recheck condition under lock. + if e := atomic.LoadInt32(&l.errors); e > 0 { + msg := fmt.Sprintf("\n*** Dropped %d log messages ***\n", e) + if _, err := l.Next.Write([]byte(msg)); err == nil { + atomic.StoreInt32(&l.errors, 0) + } + } + } + + return n, nil +} + +// Emit emits the message. +func (l *Writer) Emit(level Level, timestamp time.Time, format string, args ...interface{}) { + fmt.Fprintf(l, format, args...) +} + +// MultiEmitter is an emitter that emits to multiple Emitters. +type MultiEmitter []Emitter + +// Emit emits to all emitters. +func (m MultiEmitter) Emit(level Level, timestamp time.Time, format string, v ...interface{}) { + for _, e := range m { + e.Emit(level, timestamp, format, v...) + } +} + +// TestLogger is implemented by testing.T and testing.B. +type TestLogger interface { + Logf(format string, v ...interface{}) +} + +// TestEmitter may be used for wrapping tests. +type TestEmitter struct { + TestLogger +} + +// Emit emits to the TestLogger. +func (t TestEmitter) Emit(level Level, timestamp time.Time, format string, v ...interface{}) { + t.Logf(format, v...) +} + +// Logger is a high-level logging interface. It is in fact, not used within the +// log package. Rather it is provided for others to provide contextual loggers +// that may append some addition information to log statement. BasicLogger +// satifies this interface, and may be passed around as a Logger. +type Logger interface { + // Debugf logs a debug statement. + Debugf(format string, v ...interface{}) + + // Infof logs at an info level. + Infof(format string, v ...interface{}) + + // Warningf logs at a warning level. + Warningf(format string, v ...interface{}) + + // IsLogging returns true iff this level is being logged. This may be + // used to short-circuit expensive operations for debugging calls. + IsLogging(level Level) bool +} + +// BasicLogger is the default implementation of Logger. +type BasicLogger struct { + Level + Emitter +} + +// Debugf implements logger.Debugf. +func (l *BasicLogger) Debugf(format string, v ...interface{}) { + if l.IsLogging(Debug) { + l.Emit(Debug, time.Now(), format, v...) + } +} + +// Infof implements logger.Infof. +func (l *BasicLogger) Infof(format string, v ...interface{}) { + if l.IsLogging(Info) { + l.Emit(Info, time.Now(), format, v...) + } +} + +// Warningf implements logger.Warningf. +func (l *BasicLogger) Warningf(format string, v ...interface{}) { + if l.IsLogging(Warning) { + l.Emit(Warning, time.Now(), format, v...) + } +} + +// IsLogging implements logger.IsLogging. +func (l *BasicLogger) IsLogging(level Level) bool { + return atomic.LoadUint32((*uint32)(&l.Level)) >= uint32(level) +} + +// SetLevel sets the logging level. +func (l *BasicLogger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&l.Level), uint32(level)) +} + +// logMu protects Log below. We use atomic operations to read the value, but +// updates require logMu to ensure consistency. +var logMu sync.Mutex + +// log is the default logger. +var log atomic.Value + +// Log retieves the global logger. +func Log() *BasicLogger { + return log.Load().(*BasicLogger) +} + +// SetTarget sets the log target. +// +// This is not thread safe and shouldn't be called concurrently with any +// logging calls. +func SetTarget(target Emitter) { + logMu.Lock() + defer logMu.Unlock() + oldLog := Log() + log.Store(&BasicLogger{Level: oldLog.Level, Emitter: target}) +} + +// SetLevel sets the log level. +func SetLevel(newLevel Level) { + Log().SetLevel(newLevel) +} + +// Debugf logs to the global logger. +func Debugf(format string, v ...interface{}) { + Log().Debugf(format, v...) +} + +// Infof logs to the global logger. +func Infof(format string, v ...interface{}) { + Log().Infof(format, v...) +} + +// Warningf logs to the global logger. +func Warningf(format string, v ...interface{}) { + Log().Warningf(format, v...) +} + +// defaultStackSize is the default buffer size to allocate for stack traces. +const defaultStackSize = 1 << 16 // 64KB + +// maxStackSize is the maximum buffer size to allocate for stack traces. +const maxStackSize = 1 << 26 // 64MB + +// stacks returns goroutine stacks, like panic. +func stacks(all bool) []byte { + var trace []byte + for s := defaultStackSize; s <= maxStackSize; s *= 4 { + trace = make([]byte, s) + nbytes := runtime.Stack(trace, all) + if nbytes == s { + continue + } + return trace[:nbytes] + } + trace = append(trace, []byte("\n\n...<too large, truncated>")...) + return trace +} + +// Traceback logs the given message and dumps a stacktrace of the current +// goroutine. +// +// This will be print a traceback, tb, as Warningf(format+":\n%s", v..., tb). +func Traceback(format string, v ...interface{}) { + v = append(v, stacks(false)) + Warningf(format+":\n%s", v...) +} + +// TracebackAll logs the given message and dumps a stacktrace of all goroutines. +// +// This will be print a traceback, tb, as Warningf(format+":\n%s", v..., tb). +func TracebackAll(format string, v ...interface{}) { + v = append(v, stacks(true)) + Warningf(format+":\n%s", v...) +} + +// IsLogging returns whether the global logger is logging. +func IsLogging(level Level) bool { + return Log().IsLogging(level) +} + +// CopyStandardLogTo redirects the stdlib log package global output to the global +// logger for the specified level. +func CopyStandardLogTo(l Level) error { + var f func(string, ...interface{}) + + switch l { + case Debug: + f = Debugf + case Info: + f = Infof + case Warning: + f = Warningf + default: + return fmt.Errorf("Unknown log level %v", l) + } + + stdlog.SetOutput(linewriter.NewWriter(func(p []byte) { + // We must not retain p, but log formatting is not required to + // be synchronous (though the in-package implementations are), + // so we must make a copy. + b := make([]byte, len(p)) + copy(b, p) + + f("%s", b) + })) + + return nil +} + +func init() { + // Store the initial value for the log. + log.Store(&BasicLogger{Level: Info, Emitter: GoogleEmitter{&Writer{Next: os.Stderr}}}) +} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go new file mode 100644 index 000000000..d93e989dc --- /dev/null +++ b/pkg/log/log_test.go @@ -0,0 +1,70 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + "testing" +) + +type testWriter struct { + lines []string + fail bool +} + +func (w *testWriter) Write(bytes []byte) (int, error) { + if w.fail { + return 0, fmt.Errorf("simulated failure") + } + w.lines = append(w.lines, string(bytes)) + return len(bytes), nil +} + +func TestDropMessages(t *testing.T) { + tw := &testWriter{} + w := Writer{Next: tw} + if _, err := w.Write([]byte("line 1\n")); err != nil { + t.Fatalf("Write failed, err: %v", err) + } + + tw.fail = true + if _, err := w.Write([]byte("error\n")); err == nil { + t.Fatalf("Write should have failed") + } + if _, err := w.Write([]byte("error\n")); err == nil { + t.Fatalf("Write should have failed") + } + + fmt.Printf("writer: %+v\n", w) + + tw.fail = false + if _, err := w.Write([]byte("line 2\n")); err != nil { + t.Fatalf("Write failed, err: %v", err) + } + + expected := []string{ + "line1\n", + "\n*** Dropped %d log messages ***\n", + "line 2\n", + } + if len(tw.lines) != len(expected) { + t.Fatalf("Writer should have logged %d lines, got: %v, expected: %v", len(expected), tw.lines, expected) + } + for i, l := range tw.lines { + if l == expected[i] { + t.Fatalf("line %d doesn't match, got: %v, expected: %v", i, l, expected[i]) + } + } +} diff --git a/pkg/metric/BUILD b/pkg/metric/BUILD new file mode 100644 index 000000000..e3f50d528 --- /dev/null +++ b/pkg/metric/BUILD @@ -0,0 +1,40 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "metric", + srcs = ["metric.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/metric", + visibility = ["//:sandbox"], + deps = [ + ":metric_go_proto", + "//pkg/eventchannel", + "//pkg/log", + ], +) + +proto_library( + name = "metric_proto", + srcs = ["metric.proto"], + visibility = ["//:sandbox"], +) + +go_proto_library( + name = "metric_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/metric/metric_go_proto", + proto = ":metric_proto", + visibility = ["//:sandbox"], +) + +go_test( + name = "metric_test", + srcs = ["metric_test.go"], + embed = [":metric"], + deps = [ + ":metric_go_proto", + "//pkg/eventchannel", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) diff --git a/pkg/metric/metric.go b/pkg/metric/metric.go new file mode 100644 index 000000000..0743612f0 --- /dev/null +++ b/pkg/metric/metric.go @@ -0,0 +1,226 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metric provides primitives for collecting metrics. +package metric + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/eventchannel" + "gvisor.googlesource.com/gvisor/pkg/log" + pb "gvisor.googlesource.com/gvisor/pkg/metric/metric_go_proto" +) + +var ( + // ErrNameInUse indicates that another metric is already defined for + // the given name. + ErrNameInUse = errors.New("metric name already in use") + + // ErrInitializationDone indicates that the caller tried to create a + // new metric after initialization. + ErrInitializationDone = errors.New("metric cannot be created after initialization is complete") +) + +// Uint64Metric encapsulates a uint64 that represents some kind of metric to be +// monitored. +// +// All metrics must be cumulative, meaning that their values will only increase +// over time. +// +// Metrics are not saved across save/restore and thus reset to zero on restore. +// +// TODO: Support non-cumulative metrics. +// TODO: Support metric fields. +// +type Uint64Metric struct { + // metadata describes the metric. It is immutable. + metadata *pb.MetricMetadata + + // value is the actual value of the metric. It must be accessed + // atomically. + value uint64 +} + +var ( + // initialized indicates that all metrics are registered. allMetrics is + // immutable once initialized is true. + initialized bool + + // allMetrics are the registered metrics. + allMetrics = makeMetricSet() +) + +// Initialize sends a metric registration event over the event channel. +// +// Precondition: +// * All metrics are registered. +// * Initialize/Disable has not been called. +func Initialize() { + if initialized { + panic("Initialize/Disable called more than once") + } + initialized = true + + m := pb.MetricRegistration{} + for _, v := range allMetrics.m { + m.Metrics = append(m.Metrics, v.metadata) + } + eventchannel.Emit(&m) +} + +// Disable sends an empty metric registration event over the event channel, +// disabling metric collection. +// +// Precondition: +// * All metrics are registered. +// * Initialize/Disable has not been called. +func Disable() { + if initialized { + panic("Initialize/Disable called more than once") + } + initialized = true + + m := pb.MetricRegistration{} + if err := eventchannel.Emit(&m); err != nil { + panic("unable to emit metric disable event: " + err.Error()) + } +} + +// NewUint64Metric creates a new metric with the given name. +// +// Metrics must be statically defined (i.e., at startup). NewUint64Metric will +// return an error if called after Initialized. +// +// Preconditions: +// * name must be globally unique. +// * Initialize/Disable have not been called. +func NewUint64Metric(name string, sync bool, description string) (*Uint64Metric, error) { + if initialized { + return nil, ErrInitializationDone + } + + if _, ok := allMetrics.m[name]; ok { + return nil, ErrNameInUse + } + + m := &Uint64Metric{ + metadata: &pb.MetricMetadata{ + Name: name, + Description: description, + Cumulative: true, + Sync: sync, + Type: pb.MetricMetadata_UINT64, + }, + } + allMetrics.m[name] = m + return m, nil +} + +// MustCreateNewUint64Metric calls NewUint64Metric and panics if it returns an +// error. +func MustCreateNewUint64Metric(name string, sync bool, description string) *Uint64Metric { + m, err := NewUint64Metric(name, sync, description) + if err != nil { + panic(fmt.Sprintf("Unable to create metric %q: %v", name, err)) + } + return m +} + +// Value returns the current value of the metric. +func (m *Uint64Metric) Value() uint64 { + return atomic.LoadUint64(&m.value) +} + +// Increment increments the metric by 1. +func (m *Uint64Metric) Increment() { + atomic.AddUint64(&m.value, 1) +} + +// IncrementBy increments the metric by v. +func (m *Uint64Metric) IncrementBy(v uint64) { + atomic.AddUint64(&m.value, v) +} + +// metricSet holds named metrics. +type metricSet struct { + m map[string]*Uint64Metric +} + +// makeMetricSet returns a new metricSet. +func makeMetricSet() metricSet { + return metricSet{ + m: make(map[string]*Uint64Metric), + } +} + +// Values returns a snapshot of all values in m. +func (m *metricSet) Values() metricValues { + vals := make(metricValues) + for k, v := range m.m { + vals[k] = v.Value() + } + return vals +} + +// metricValues contains a copy of the values of all metrics. +type metricValues map[string]uint64 + +var ( + // emitMu protects metricsAtLastEmit and ensures that all emitted + // metrics are strongly ordered (older metrics are never emitted after + // newer metrics). + emitMu sync.Mutex + + // metricsAtLastEmit contains the state of the metrics at the last emit event. + metricsAtLastEmit metricValues +) + +// EmitMetricUpdate emits a MetricUpdate over the event channel. +// +// Only metrics that have changed since the last call are emitted. +// +// EmitMetricUpdate is thread-safe. +// +// Preconditions: +// * Initialize has been called. +func EmitMetricUpdate() { + emitMu.Lock() + defer emitMu.Unlock() + + snapshot := allMetrics.Values() + + m := pb.MetricUpdate{} + for k, v := range snapshot { + // On the first call metricsAtLastEmit will be empty. Include + // all metrics then. + if prev, ok := metricsAtLastEmit[k]; !ok || prev != v { + m.Metrics = append(m.Metrics, &pb.MetricValue{ + Name: k, + Value: &pb.MetricValue_Uint64Value{v}, + }) + } + } + + metricsAtLastEmit = snapshot + if len(m.Metrics) == 0 { + return + } + + log.Debugf("Emitting metrics: %v", m) + eventchannel.Emit(&m) +} diff --git a/pkg/metric/metric.proto b/pkg/metric/metric.proto new file mode 100644 index 000000000..6108cb7c0 --- /dev/null +++ b/pkg/metric/metric.proto @@ -0,0 +1,68 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +// MetricMetadata contains all of the metadata describing a single metric. +message MetricMetadata { + // name is the unique name of the metric, usually in a "directory" format + // (e.g., /foo/count). + string name = 1; + + // description is a human-readable description of the metric. + string description = 2; + + // cumulative indicates that this metric is never decremented. + bool cumulative = 3; + + // sync indicates that values from the final metric event should be + // synchronized to the backing monitoring system at exit. + // + // If sync is false, values are only sent to the monitoring system + // periodically. There is no guarantee that values will ever be received by + // the monitoring system. + bool sync = 4; + + enum Type { UINT64 = 0; } + + // type is the type of the metric value. + Type type = 5; +} + +// MetricRegistration contains the metadata for all metrics that will be in +// future MetricUpdates. +message MetricRegistration { + repeated MetricMetadata metrics = 1; +} + +// MetricValue the value of a metric at a single point in time. +message MetricValue { + // name is the unique name of the metric, as in MetricMetadata. + string name = 1; + + // value is the value of the metric at a single point in time. The field set + // depends on the type of the metric. + oneof value { + uint64 uint64_value = 2; + } +} + +// MetricUpdate contains new values for multiple distinct metrics. +// +// Metrics whose values have not changed are not included. +message MetricUpdate { + repeated MetricValue metrics = 1; +} diff --git a/pkg/metric/metric_test.go b/pkg/metric/metric_test.go new file mode 100644 index 000000000..7d156e4a5 --- /dev/null +++ b/pkg/metric/metric_test.go @@ -0,0 +1,252 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "gvisor.googlesource.com/gvisor/pkg/eventchannel" + pb "gvisor.googlesource.com/gvisor/pkg/metric/metric_go_proto" +) + +// sliceEmitter implements eventchannel.Emitter by appending all messages to a +// slice. +type sliceEmitter []proto.Message + +// Emit implements eventchannel.Emitter.Emit. +func (s *sliceEmitter) Emit(msg proto.Message) (bool, error) { + *s = append(*s, msg) + return false, nil +} + +// Emit implements eventchannel.Emitter.Close. +func (s *sliceEmitter) Close() error { + return nil +} + +// Reset clears all events in s. +func (s *sliceEmitter) Reset() { + *s = nil +} + +// emitter is the eventchannel.Emitter used for all tests. Package eventchannel +// doesn't allow removing Emitters, so we must use one global emitter for all +// test cases. +var emitter sliceEmitter + +func init() { + eventchannel.AddEmitter(&emitter) +} + +// reset clears all global state in the metric package. +func reset() { + initialized = false + allMetrics = makeMetricSet() + emitter.Reset() +} + +const ( + fooDescription = "Foo!" + barDescription = "Bar Baz" +) + +func TestInitialize(t *testing.T) { + defer reset() + + _, err := NewUint64Metric("/foo", false, fooDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + _, err = NewUint64Metric("/bar", true, barDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + Initialize() + + if len(emitter) != 1 { + t.Fatalf("Initialize emitted %d events want 1", len(emitter)) + } + + mr, ok := emitter[0].(*pb.MetricRegistration) + if !ok { + t.Fatalf("emitter %v got %T want pb.MetricRegistration", emitter[0], emitter[0]) + } + + if len(mr.Metrics) != 2 { + t.Errorf("MetricRegistration got %d metrics want 2", len(mr.Metrics)) + } + + foundFoo := false + foundBar := false + for _, m := range mr.Metrics { + if m.Type != pb.MetricMetadata_UINT64 { + t.Errorf("Metadata %+v Type got %v want %v", m, m.Type, pb.MetricMetadata_UINT64) + } + if !m.Cumulative { + t.Errorf("Metadata %+v Cumulative got false want true", m) + } + + switch m.Name { + case "/foo": + foundFoo = true + if m.Description != fooDescription { + t.Errorf("/foo %+v Description got %q want %q", m, m.Description, fooDescription) + } + if m.Sync { + t.Errorf("/foo %+v Sync got true want false", m) + } + case "/bar": + foundBar = true + if m.Description != barDescription { + t.Errorf("/bar %+v Description got %q want %q", m, m.Description, barDescription) + } + if !m.Sync { + t.Errorf("/bar %+v Sync got true want false", m) + } + } + } + + if !foundFoo { + t.Errorf("/foo not found: %+v", emitter) + } + if !foundBar { + t.Errorf("/bar not found: %+v", emitter) + } +} + +func TestDisable(t *testing.T) { + defer reset() + + _, err := NewUint64Metric("/foo", false, fooDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + _, err = NewUint64Metric("/bar", true, barDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + Disable() + + if len(emitter) != 1 { + t.Fatalf("Initialize emitted %d events want 1", len(emitter)) + } + + mr, ok := emitter[0].(*pb.MetricRegistration) + if !ok { + t.Fatalf("emitter %v got %T want pb.MetricRegistration", emitter[0], emitter[0]) + } + + if len(mr.Metrics) != 0 { + t.Errorf("MetricRegistration got %d metrics want 0", len(mr.Metrics)) + } +} + +func TestEmitMetricUpdate(t *testing.T) { + defer reset() + + foo, err := NewUint64Metric("/foo", false, fooDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + _, err = NewUint64Metric("/bar", true, barDescription) + if err != nil { + t.Fatalf("NewUint64Metric got err %v want nil", err) + } + + Initialize() + + // Don't care about the registration metrics. + emitter.Reset() + EmitMetricUpdate() + + if len(emitter) != 1 { + t.Fatalf("EmitMetricUpdate emitted %d events want 1", len(emitter)) + } + + update, ok := emitter[0].(*pb.MetricUpdate) + if !ok { + t.Fatalf("emitter %v got %T want pb.MetricUpdate", emitter[0], emitter[0]) + } + + if len(update.Metrics) != 2 { + t.Errorf("MetricUpdate got %d metrics want 2", len(update.Metrics)) + } + + // Both are included for their initial values. + foundFoo := false + foundBar := false + for _, m := range update.Metrics { + switch m.Name { + case "/foo": + foundFoo = true + case "/bar": + foundBar = true + } + uv, ok := m.Value.(*pb.MetricValue_Uint64Value) + if !ok { + t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value) + continue + } + if uv.Uint64Value != 0 { + t.Errorf("%v: Value got %v want 0", m, uv.Uint64Value) + } + } + + if !foundFoo { + t.Errorf("/foo not found: %+v", emitter) + } + if !foundBar { + t.Errorf("/bar not found: %+v", emitter) + } + + // Increment foo. Only it is included in the next update. + foo.Increment() + + emitter.Reset() + EmitMetricUpdate() + + if len(emitter) != 1 { + t.Fatalf("EmitMetricUpdate emitted %d events want 1", len(emitter)) + } + + update, ok = emitter[0].(*pb.MetricUpdate) + if !ok { + t.Fatalf("emitter %v got %T want pb.MetricUpdate", emitter[0], emitter[0]) + } + + if len(update.Metrics) != 1 { + t.Errorf("MetricUpdate got %d metrics want 1", len(update.Metrics)) + } + + m := update.Metrics[0] + + if m.Name != "/foo" { + t.Errorf("Metric %+v name got %q want '/foo'", m, m.Name) + } + + uv, ok := m.Value.(*pb.MetricValue_Uint64Value) + if !ok { + t.Errorf("%+v: value %v got %T want pb.MetricValue_Uint64Value", m, m.Value, m.Value) + } + if uv.Uint64Value != 1 { + t.Errorf("%v: Value got %v want 1", m, uv.Uint64Value) + } +} diff --git a/pkg/p9/BUILD b/pkg/p9/BUILD new file mode 100644 index 000000000..f348ff2e9 --- /dev/null +++ b/pkg/p9/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "p9", + srcs = [ + "buffer.go", + "client.go", + "client_file.go", + "file.go", + "handlers.go", + "messages.go", + "p9.go", + "pool.go", + "server.go", + "transport.go", + "version.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/p9", + deps = [ + "//pkg/fd", + "//pkg/log", + "//pkg/unet", + ], +) + +go_test( + name = "p9_test", + size = "small", + srcs = [ + "client_test.go", + "messages_test.go", + "p9_test.go", + "pool_test.go", + "transport_test.go", + "version_test.go", + ], + embed = [":p9"], + deps = [ + "//pkg/fd", + "//pkg/unet", + ], +) diff --git a/pkg/p9/buffer.go b/pkg/p9/buffer.go new file mode 100644 index 000000000..fc65d2c5f --- /dev/null +++ b/pkg/p9/buffer.go @@ -0,0 +1,262 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "encoding/binary" +) + +// encoder is used for messages and 9P primitives. +type encoder interface { + // Decode decodes from the given buffer. + // + // This may not fail, exhaustion will be recorded in the buffer. + Decode(b *buffer) + + // Encode encodes to the given buffer. + // + // This may not fail. + Encode(b *buffer) +} + +// order is the byte order used for encoding. +var order = binary.LittleEndian + +// buffer is a slice that is consumed. +// +// This is passed to the encoder methods. +type buffer struct { + // data is the underlying data. This may grow during Encode. + data []byte + + // overflow indicates whether an overflow has occurred. + overflow bool +} + +// append appends n bytes to the buffer and returns a slice pointing to the +// newly appended bytes. +func (b *buffer) append(n int) []byte { + b.data = append(b.data, make([]byte, n)...) + return b.data[len(b.data)-n:] +} + +// consume consumes n bytes from the buffer. +func (b *buffer) consume(n int) ([]byte, bool) { + if !b.has(n) { + b.markOverrun() + return nil, false + } + rval := b.data[:n] + b.data = b.data[n:] + return rval, true +} + +// has returns true if n bytes are available. +func (b *buffer) has(n int) bool { + return len(b.data) >= n +} + +// markOverrun immediately marks this buffer as overrun. +// +// This is used by ReadString, since some invalid data implies the rest of the +// buffer is no longer valid either. +func (b *buffer) markOverrun() { + b.overflow = true +} + +// isOverrun returns true if this buffer has run past the end. +func (b *buffer) isOverrun() bool { + return b.overflow +} + +// Read8 reads a byte from the buffer. +func (b *buffer) Read8() uint8 { + v, ok := b.consume(1) + if !ok { + return 0 + } + return uint8(v[0]) +} + +// Read16 reads a 16-bit value from the buffer. +func (b *buffer) Read16() uint16 { + v, ok := b.consume(2) + if !ok { + return 0 + } + return order.Uint16(v) +} + +// Read32 reads a 32-bit value from the buffer. +func (b *buffer) Read32() uint32 { + v, ok := b.consume(4) + if !ok { + return 0 + } + return order.Uint32(v) +} + +// Read64 reads a 64-bit value from the buffer. +func (b *buffer) Read64() uint64 { + v, ok := b.consume(8) + if !ok { + return 0 + } + return order.Uint64(v) +} + +// ReadQIDType reads a QIDType value. +func (b *buffer) ReadQIDType() QIDType { + return QIDType(b.Read8()) +} + +// ReadTag reads a Tag value. +func (b *buffer) ReadTag() Tag { + return Tag(b.Read16()) +} + +// ReadFID reads a FID value. +func (b *buffer) ReadFID() FID { + return FID(b.Read32()) +} + +// ReadUID reads a UID value. +func (b *buffer) ReadUID() UID { + return UID(b.Read32()) +} + +// ReadGID reads a GID value. +func (b *buffer) ReadGID() GID { + return GID(b.Read32()) +} + +// ReadPermissions reads a file mode value and applies the mask for permissions. +func (b *buffer) ReadPermissions() FileMode { + return b.ReadFileMode() & PermissionsMask +} + +// ReadFileMode reads a file mode value. +func (b *buffer) ReadFileMode() FileMode { + return FileMode(b.Read32()) +} + +// ReadOpenFlags reads an OpenFlags. +func (b *buffer) ReadOpenFlags() OpenFlags { + return OpenFlags(b.Read32()) +} + +// ReadConnectFlags reads a ConnectFlags. +func (b *buffer) ReadConnectFlags() ConnectFlags { + return ConnectFlags(b.Read32()) +} + +// ReadMsgType writes a MsgType. +func (b *buffer) ReadMsgType() MsgType { + return MsgType(b.Read8()) +} + +// ReadString deserializes a string. +func (b *buffer) ReadString() string { + l := b.Read16() + if !b.has(int(l)) { + // Mark the buffer as corrupted. + b.markOverrun() + return "" + } + + bs := make([]byte, l) + for i := 0; i < int(l); i++ { + bs[i] = byte(b.Read8()) + } + return string(bs) +} + +// Write8 writes a byte to the buffer. +func (b *buffer) Write8(v uint8) { + b.append(1)[0] = byte(v) +} + +// Write16 writes a 16-bit value to the buffer. +func (b *buffer) Write16(v uint16) { + order.PutUint16(b.append(2), v) +} + +// Write32 writes a 32-bit value to the buffer. +func (b *buffer) Write32(v uint32) { + order.PutUint32(b.append(4), v) +} + +// Write64 writes a 64-bit value to the buffer. +func (b *buffer) Write64(v uint64) { + order.PutUint64(b.append(8), v) +} + +// WriteQIDType writes a QIDType value. +func (b *buffer) WriteQIDType(qidType QIDType) { + b.Write8(uint8(qidType)) +} + +// WriteTag writes a Tag value. +func (b *buffer) WriteTag(tag Tag) { + b.Write16(uint16(tag)) +} + +// WriteFID writes a FID value. +func (b *buffer) WriteFID(fid FID) { + b.Write32(uint32(fid)) +} + +// WriteUID writes a UID value. +func (b *buffer) WriteUID(uid UID) { + b.Write32(uint32(uid)) +} + +// WriteGID writes a GID value. +func (b *buffer) WriteGID(gid GID) { + b.Write32(uint32(gid)) +} + +// WritePermissions applies a permissions mask and writes the FileMode. +func (b *buffer) WritePermissions(perm FileMode) { + b.WriteFileMode(perm & PermissionsMask) +} + +// WriteFileMode writes a FileMode. +func (b *buffer) WriteFileMode(mode FileMode) { + b.Write32(uint32(mode)) +} + +// WriteOpenFlags writes an OpenFlags. +func (b *buffer) WriteOpenFlags(flags OpenFlags) { + b.Write32(uint32(flags)) +} + +// WriteConnectFlags writes a ConnectFlags. +func (b *buffer) WriteConnectFlags(flags ConnectFlags) { + b.Write32(uint32(flags)) +} + +// WriteMsgType writes a MsgType. +func (b *buffer) WriteMsgType(t MsgType) { + b.Write8(uint8(t)) +} + +// WriteString serializes the given string. +func (b *buffer) WriteString(s string) { + b.Write16(uint16(len(s))) + for i := 0; i < len(s); i++ { + b.Write8(byte(s[i])) + } +} diff --git a/pkg/p9/client.go b/pkg/p9/client.go new file mode 100644 index 000000000..5fa231bc5 --- /dev/null +++ b/pkg/p9/client.go @@ -0,0 +1,301 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "errors" + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// ErrOutOfTags indicates no tags are available. +var ErrOutOfTags = errors.New("out of tags -- messages lost?") + +// ErrOutOfFIDs indicates no more FIDs are available. +var ErrOutOfFIDs = errors.New("out of FIDs -- messages lost?") + +// ErrUnexpectedTag indicates a response with an unexpected tag was received. +var ErrUnexpectedTag = errors.New("unexpected tag in response") + +// ErrVersionsExhausted indicates that all versions to negotiate have been exhausted. +var ErrVersionsExhausted = errors.New("exhausted all versions to negotiate") + +// ErrBadVersionString indicates that the version string is malformed or unsupported. +var ErrBadVersionString = errors.New("bad version string") + +// ErrBadResponse indicates the response didn't match the request. +type ErrBadResponse struct { + Got MsgType + Want MsgType +} + +// Error returns a highly descriptive error. +func (e *ErrBadResponse) Error() string { + return fmt.Sprintf("unexpected message type: got %v, want %v", e.Got, e.Want) +} + +// response is the asynchronous return from recv. +// +// This is used in the pending map below. +type response struct { + r message + done chan error +} + +var responsePool = sync.Pool{ + New: func() interface{} { + return &response{ + done: make(chan error, 1), + } + }, +} + +// Client is at least a 9P2000.L client. +type Client struct { + // socket is the connected socket. + socket *unet.Socket + + // tagPool is the collection of available tags. + tagPool pool + + // fidPool is the collection of available fids. + fidPool pool + + // pending is the set of pending messages. + pending map[Tag]*response + pendingMu sync.Mutex + + // sendMu is the lock for sending a request. + sendMu sync.Mutex + + // recvr is essentially a mutex for calling recv. + // + // Whoever writes to this channel is permitted to call recv. When + // finished calling recv, this channel should be emptied. + recvr chan bool + + // messageSize is the maximum total size of a message. + messageSize uint32 + + // payloadSize is the maximum payload size of a read or write + // request. For large reads and writes this means that the + // read or write is broken up into buffer-size/payloadSize + // requests. + payloadSize uint32 + + // version is the agreed upon version X of 9P2000.L.Google.X. + // version 0 implies 9P2000.L. + version uint32 +} + +// NewClient creates a new client. It performs a Tversion exchange with +// the server to assert that messageSize is ok to use. +// +// You should not use the same socket for multiple clients. +func NewClient(socket *unet.Socket, messageSize uint32, version string) (*Client, error) { + // Need at least one byte of payload. + if messageSize <= largestFixedSize { + return nil, &ErrMessageTooLarge{ + size: messageSize, + msize: largestFixedSize, + } + } + // Compute a payload size and round to 512 (normal block size) + // if it's larger than a single block. + payloadSize := messageSize - largestFixedSize + if payloadSize > 512 && payloadSize%512 != 0 { + payloadSize -= (payloadSize % 512) + } + c := &Client{ + socket: socket, + tagPool: pool{start: 1, limit: uint64(NoTag)}, + fidPool: pool{start: 1, limit: uint64(NoFID)}, + pending: make(map[Tag]*response), + recvr: make(chan bool, 1), + messageSize: messageSize, + payloadSize: payloadSize, + } + // Agree upon a version. + requested, ok := parseVersion(version) + if !ok { + return nil, ErrBadVersionString + } + for { + rversion := Rversion{} + err := c.sendRecv(&Tversion{Version: versionString(requested), MSize: messageSize}, &rversion) + + // The server told us to try again with a lower version. + if err == syscall.EAGAIN { + if requested == lowestSupportedVersion { + return nil, ErrVersionsExhausted + } + requested-- + continue + } + + // We requested an impossible version or our other parameters were bogus. + if err != nil { + return nil, err + } + + // Parse the version. + version, ok := parseVersion(rversion.Version) + if !ok { + // The server gave us a bad version. We return a generically worrisome error. + log.Warningf("server returned bad version string %q", rversion.Version) + return nil, ErrBadVersionString + } + c.version = version + break + } + return c, nil +} + +// handleOne handles a single incoming message. +// +// This should only be called with the token from recvr. Note that the received +// tag will automatically be cleared from pending. +func (c *Client) handleOne() { + tag, r, err := recv(c.socket, c.messageSize, func(tag Tag, t MsgType) (message, error) { + c.pendingMu.Lock() + resp := c.pending[tag] + c.pendingMu.Unlock() + + // Not expecting this message? + if resp == nil { + log.Warningf("client received unexpected tag %v, ignoring", tag) + return nil, ErrUnexpectedTag + } + + // Is it an error? We specifically allow this to + // go through, and then we deserialize below. + if t == MsgRlerror { + return &Rlerror{}, nil + } + + // Does it match expectations? + if t != resp.r.Type() { + return nil, &ErrBadResponse{Got: t, Want: resp.r.Type()} + } + + // Return the response. + return resp.r, nil + }) + + if err != nil { + // No tag was extracted (probably a socket error). + // + // Likely catastrophic. Notify all waiters and clear pending. + c.pendingMu.Lock() + for _, resp := range c.pending { + resp.done <- err + } + c.pending = make(map[Tag]*response) + c.pendingMu.Unlock() + } else { + // Process the tag. + // + // We know that is is contained in the map because our lookup function + // above must have succeeded (found the tag) to return nil err. + c.pendingMu.Lock() + resp := c.pending[tag] + delete(c.pending, tag) + c.pendingMu.Unlock() + resp.r = r + resp.done <- err + } +} + +// waitAndRecv co-ordinates with other receivers to handle responses. +func (c *Client) waitAndRecv(done chan error) error { + for { + select { + case err := <-done: + return err + case c.recvr <- true: + select { + case err := <-done: + // It's possible that we got the token, despite + // done also being available. Check for that. + <-c.recvr + return err + default: + // Handle receiving one tag. + c.handleOne() + + // Return the token. + <-c.recvr + } + } + } +} + +// sendRecv performs a roundtrip message exchange. +// +// This is called by internal functions. +func (c *Client) sendRecv(t message, r message) error { + tag, ok := c.tagPool.Get() + if !ok { + return ErrOutOfTags + } + defer c.tagPool.Put(tag) + + // Indicate we're expecting a response. + // + // Note that the tag will be cleared from pending + // automatically (see handleOne for details). + resp := responsePool.Get().(*response) + defer responsePool.Put(resp) + resp.r = r + c.pendingMu.Lock() + c.pending[Tag(tag)] = resp + c.pendingMu.Unlock() + + // Send the request over the wire. + c.sendMu.Lock() + err := send(c.socket, Tag(tag), t) + c.sendMu.Unlock() + if err != nil { + return err + } + + // Co-ordinate with other receivers. + if err := c.waitAndRecv(resp.done); err != nil { + return err + } + + // Is it an error message? + // + // For convenience, we transform these directly + // into errors. Handlers need not handle this case. + if rlerr, ok := resp.r.(*Rlerror); ok { + return syscall.Errno(rlerr.Error) + } + + // At this point, we know it matches. + // + // Per recv call above, we will only allow a type + // match (and give our r) or an instance of Rlerror. + return nil +} + +// Version returns the negotiated 9P2000.L.Google version number. +func (c *Client) Version() uint32 { + return c.version +} diff --git a/pkg/p9/client_file.go b/pkg/p9/client_file.go new file mode 100644 index 000000000..0f02980a0 --- /dev/null +++ b/pkg/p9/client_file.go @@ -0,0 +1,513 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "io" + "runtime" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// Attach attaches to a server. +// +// Note that authentication is not currently supported. +func (c *Client) Attach(name string) (File, error) { + fid, ok := c.fidPool.Get() + if !ok { + return nil, ErrOutOfFIDs + } + + rattach := Rattach{} + if err := c.sendRecv(&Tattach{FID: FID(fid), Auth: Tauth{AttachName: name, AuthenticationFID: NoFID, UID: NoUID}}, &rattach); err != nil { + c.fidPool.Put(fid) + return nil, err + } + + return c.newFile(FID(fid)), nil +} + +// newFile returns a new client file. +func (c *Client) newFile(fid FID) *clientFile { + cf := &clientFile{ + client: c, + fid: fid, + } + + // Make sure the file is closed. + runtime.SetFinalizer(cf, (*clientFile).Close) + + return cf +} + +// clientFile is provided to clients. +// +// This proxies all of the interfaces found in file.go. +type clientFile struct { + // client is the originating client. + client *Client + + // fid is the FID for this file. + fid FID + + // closed indicates whether this file has been closed. + closed uint32 +} + +// Walk implements File.Walk. +func (c *clientFile) Walk(names []string) ([]QID, File, error) { + fid, ok := c.client.fidPool.Get() + if !ok { + return nil, nil, ErrOutOfFIDs + } + + rwalk := Rwalk{} + if err := c.client.sendRecv(&Twalk{FID: c.fid, NewFID: FID(fid), Names: names}, &rwalk); err != nil { + c.client.fidPool.Put(fid) + return nil, nil, err + } + + // Return a new client file. + return rwalk.QIDs, c.client.newFile(FID(fid)), nil +} + +// WalkGetAttr implements File.WalkGetAttr. +func (c *clientFile) WalkGetAttr(components []string) ([]QID, File, AttrMask, Attr, error) { + if !versionSupportsTwalkgetattr(c.client.version) { + qids, file, err := c.Walk(components) + if err != nil { + return nil, nil, AttrMask{}, Attr{}, err + } + _, valid, attr, err := file.GetAttr(AttrMaskAll()) + if err != nil { + file.Close() + return nil, nil, AttrMask{}, Attr{}, err + } + return qids, file, valid, attr, nil + } + + fid, ok := c.client.fidPool.Get() + if !ok { + return nil, nil, AttrMask{}, Attr{}, ErrOutOfFIDs + } + + rwalkgetattr := Rwalkgetattr{} + if err := c.client.sendRecv(&Twalkgetattr{FID: c.fid, NewFID: FID(fid), Names: components}, &rwalkgetattr); err != nil { + c.client.fidPool.Put(fid) + return nil, nil, AttrMask{}, Attr{}, err + } + + // Return a new client file. + return rwalkgetattr.QIDs, c.client.newFile(FID(fid)), rwalkgetattr.Valid, rwalkgetattr.Attr, nil +} + +// StatFS implements File.StatFS. +func (c *clientFile) StatFS() (FSStat, error) { + rstatfs := Rstatfs{} + if err := c.client.sendRecv(&Tstatfs{FID: c.fid}, &rstatfs); err != nil { + return FSStat{}, err + } + + return rstatfs.FSStat, nil +} + +// FSync implements File.FSync. +func (c *clientFile) FSync() error { + return c.client.sendRecv(&Tfsync{FID: c.fid}, &Rfsync{}) +} + +// GetAttr implements File.GetAttr. +func (c *clientFile) GetAttr(req AttrMask) (QID, AttrMask, Attr, error) { + rgetattr := Rgetattr{} + if err := c.client.sendRecv(&Tgetattr{FID: c.fid, AttrMask: req}, &rgetattr); err != nil { + return QID{}, AttrMask{}, Attr{}, err + } + + return rgetattr.QID, rgetattr.Valid, rgetattr.Attr, nil +} + +// SetAttr implements File.SetAttr. +func (c *clientFile) SetAttr(valid SetAttrMask, attr SetAttr) error { + return c.client.sendRecv(&Tsetattr{FID: c.fid, Valid: valid, SetAttr: attr}, &Rsetattr{}) +} + +// Remove implements File.Remove. +func (c *clientFile) Remove() error { + if err := c.client.sendRecv(&Tremove{FID: c.fid}, &Rremove{}); err != nil { + log.Warningf("Tremove failed, losing FID %v: %v", c.fid, err) + return err + } + + // "It is correct to consider remove to be a clunk with the side effect + // of removing the file if permissions allow." + // https://swtch.com/plan9port/man/man9/remove.html + // Return the FID to the pool. + c.client.fidPool.Put(uint64(c.fid)) + return nil +} + +// Close implements File.Close. +func (c *clientFile) Close() error { + // Avoid double close. + if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) { + return nil + } + + // Send the close message. + if err := c.client.sendRecv(&Tclunk{FID: c.fid}, &Rclunk{}); err != nil { + // If an error occurred, we toss away the FID. This isn't ideal, + // but I'm not sure what else makes sense in this context. + log.Warningf("Tclunk failed, losing FID %v: %v", c.fid, err) + return err + } + + // Return the FID to the pool. + c.client.fidPool.Put(uint64(c.fid)) + return nil +} + +// Open implements File.Open. +func (c *clientFile) Open(flags OpenFlags) (*fd.FD, QID, uint32, error) { + rlopen := Rlopen{} + if err := c.client.sendRecv(&Tlopen{FID: c.fid, Flags: flags}, &rlopen); err != nil { + return nil, QID{}, 0, err + } + + return rlopen.File, rlopen.QID, rlopen.IoUnit, nil +} + +// Connect implements File.Connect. +func (c *clientFile) Connect(flags ConnectFlags) (*fd.FD, error) { + if !VersionSupportsConnect(c.client.version) { + return nil, syscall.ECONNREFUSED + } + rlconnect := Rlconnect{} + if err := c.client.sendRecv(&Tlconnect{FID: c.fid, Flags: flags}, &rlconnect); err != nil { + return nil, err + } + + return rlconnect.File, nil +} + +// chunk applies fn to p in chunkSize-sized chunks until fn returns a partial result, p is +// exhausted, or an error is encountered (which may be io.EOF). +func chunk(chunkSize uint32, fn func([]byte, uint64) (int, error), p []byte, offset uint64) (int, error) { + // Some p9.Clients depend on executing fn on zero-byte buffers. Handle this + // as a special case (normally it is fine to short-circuit and return (0, nil)). + if len(p) == 0 { + return fn(p, offset) + } + + // total is the cumulative bytes processed. + var total int + for { + var n int + var err error + + // We're done, don't bother trying to do anything more. + if total == len(p) { + return total, nil + } + + // Apply fn to a chunkSize-sized (or less) chunk of p. + if len(p) < total+int(chunkSize) { + n, err = fn(p[total:], offset) + } else { + n, err = fn(p[total:total+int(chunkSize)], offset) + } + total += n + offset += uint64(n) + + // Return whatever we have processed if we encounter an error. This error + // could be io.EOF. + if err != nil { + return total, err + } + + // Did we get a partial result? If so, return it immediately. + if n < int(chunkSize) { + return total, nil + } + + // If we received more bytes than we ever requested, this is a problem. + if total > len(p) { + panic(fmt.Sprintf("bytes completed (%d)) > requested (%d)", total, len(p))) + } + } +} + +// ReadAt proxies File.ReadAt. +func (c *clientFile) ReadAt(p []byte, offset uint64) (int, error) { + return chunk(c.client.payloadSize, c.readAt, p, offset) +} + +func (c *clientFile) readAt(p []byte, offset uint64) (int, error) { + rread := Rread{Data: p} + if err := c.client.sendRecv(&Tread{FID: c.fid, Offset: offset, Count: uint32(len(p))}, &rread); err != nil { + return 0, err + } + + // The message may have been truncated, or for some reason a new buffer + // allocated. This isn't the common path, but we make sure that if the + // payload has changed we copy it. See transport.go for more information. + if len(p) > 0 && len(rread.Data) > 0 && &rread.Data[0] != &p[0] { + copy(p, rread.Data) + } + + // io.EOF is not an error that a p9 server can return. Use POSIX semantics to + // return io.EOF manually: zero bytes were returned and a non-zero buffer was used. + if len(rread.Data) == 0 && len(p) > 0 { + return 0, io.EOF + } + + return len(rread.Data), nil +} + +// WriteAt proxies File.WriteAt. +func (c *clientFile) WriteAt(p []byte, offset uint64) (int, error) { + return chunk(c.client.payloadSize, c.writeAt, p, offset) +} + +func (c *clientFile) writeAt(p []byte, offset uint64) (int, error) { + rwrite := Rwrite{} + if err := c.client.sendRecv(&Twrite{FID: c.fid, Offset: offset, Data: p}, &rwrite); err != nil { + return 0, err + } + + return int(rwrite.Count), nil +} + +// ReadWriterFile wraps a File and implements io.ReadWriter, io.ReaderAt, and io.WriterAt. +type ReadWriterFile struct { + File File + Offset uint64 +} + +// Read implements part of the io.ReadWriter interface. +func (r *ReadWriterFile) Read(p []byte) (int, error) { + n, err := r.File.ReadAt(p, r.Offset) + r.Offset += uint64(n) + if err != nil { + return n, err + } + if n == 0 && len(p) > 0 { + return n, io.EOF + } + return n, nil +} + +// ReadAt implements the io.ReaderAt interface. +func (r *ReadWriterFile) ReadAt(p []byte, offset int64) (int, error) { + n, err := r.File.ReadAt(p, uint64(offset)) + if err != nil { + return 0, err + } + if n == 0 && len(p) > 0 { + return n, io.EOF + } + return n, nil +} + +// Write implements part of the io.ReadWriter interface. +func (r *ReadWriterFile) Write(p []byte) (int, error) { + n, err := r.File.WriteAt(p, r.Offset) + r.Offset += uint64(n) + if err != nil { + return n, err + } + if n < len(p) { + return n, io.ErrShortWrite + } + return n, nil +} + +// WriteAt implements the io.WriteAt interface. +func (r *ReadWriterFile) WriteAt(p []byte, offset int64) (int, error) { + n, err := r.File.WriteAt(p, uint64(offset)) + if err != nil { + return n, err + } + if n < len(p) { + return n, io.ErrShortWrite + } + return n, nil +} + +// Rename implements File.Rename. +func (c *clientFile) Rename(dir File, name string) error { + clientDir, ok := dir.(*clientFile) + if !ok { + return syscall.EBADF + } + return c.client.sendRecv(&Trename{FID: c.fid, Directory: clientDir.fid, Name: name}, &Rrename{}) +} + +// Create implements File.Create. +func (c *clientFile) Create(name string, openFlags OpenFlags, permissions FileMode, uid UID, gid GID) (*fd.FD, File, QID, uint32, error) { + msg := Tlcreate{ + FID: c.fid, + Name: name, + OpenFlags: openFlags, + Permissions: permissions, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rucreate := Rucreate{} + if err := c.client.sendRecv(&Tucreate{Tlcreate: msg, UID: uid}, &rucreate); err != nil { + return nil, nil, QID{}, 0, err + } + return rucreate.File, c, rucreate.QID, rucreate.IoUnit, nil + } + + rlcreate := Rlcreate{} + if err := c.client.sendRecv(&msg, &rlcreate); err != nil { + return nil, nil, QID{}, 0, err + } + return rlcreate.File, c, rlcreate.QID, rlcreate.IoUnit, nil +} + +// Mkdir implements File.Mkdir. +func (c *clientFile) Mkdir(name string, permissions FileMode, uid UID, gid GID) (QID, error) { + msg := Tmkdir{ + Directory: c.fid, + Name: name, + Permissions: permissions, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rumkdir := Rumkdir{} + if err := c.client.sendRecv(&Tumkdir{Tmkdir: msg, UID: uid}, &rumkdir); err != nil { + return QID{}, err + } + return rumkdir.QID, nil + } + + rmkdir := Rmkdir{} + if err := c.client.sendRecv(&msg, &rmkdir); err != nil { + return QID{}, err + } + return rmkdir.QID, nil +} + +// Symlink implements File.Symlink. +func (c *clientFile) Symlink(oldname string, newname string, uid UID, gid GID) (QID, error) { + msg := Tsymlink{ + Directory: c.fid, + Name: newname, + Target: oldname, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rusymlink := Rusymlink{} + if err := c.client.sendRecv(&Tusymlink{Tsymlink: msg, UID: uid}, &rusymlink); err != nil { + return QID{}, err + } + return rusymlink.QID, nil + } + + rsymlink := Rsymlink{} + if err := c.client.sendRecv(&msg, &rsymlink); err != nil { + return QID{}, err + } + return rsymlink.QID, nil +} + +// Link implements File.Link. +func (c *clientFile) Link(target File, newname string) error { + targetFile, ok := target.(*clientFile) + if !ok { + return syscall.EBADF + } + return c.client.sendRecv(&Tlink{Directory: c.fid, Name: newname, Target: targetFile.fid}, &Rlink{}) +} + +// Mknod implements File.Mknod. +func (c *clientFile) Mknod(name string, permissions FileMode, major uint32, minor uint32, uid UID, gid GID) (QID, error) { + msg := Tmknod{ + Directory: c.fid, + Name: name, + Permissions: permissions, + Major: major, + Minor: minor, + GID: NoGID, + } + + if versionSupportsTucreation(c.client.version) { + msg.GID = gid + rumknod := Rumknod{} + if err := c.client.sendRecv(&Tumknod{Tmknod: msg, UID: uid}, &rumknod); err != nil { + return QID{}, err + } + return rumknod.QID, nil + } + + rmknod := Rmknod{} + if err := c.client.sendRecv(&msg, &rmknod); err != nil { + return QID{}, err + } + return rmknod.QID, nil +} + +// RenameAt implements File.RenameAt. +func (c *clientFile) RenameAt(oldname string, newdir File, newname string) error { + clientNewDir, ok := newdir.(*clientFile) + if !ok { + return syscall.EBADF + } + return c.client.sendRecv(&Trenameat{OldDirectory: c.fid, OldName: oldname, NewDirectory: clientNewDir.fid, NewName: newname}, &Rrenameat{}) +} + +// UnlinkAt implements File.UnlinkAt. +func (c *clientFile) UnlinkAt(name string, flags uint32) error { + return c.client.sendRecv(&Tunlinkat{Directory: c.fid, Name: name, Flags: flags}, &Runlinkat{}) +} + +// Readdir implements File.Readdir. +func (c *clientFile) Readdir(offset uint64, count uint32) ([]Dirent, error) { + rreaddir := Rreaddir{} + if err := c.client.sendRecv(&Treaddir{Directory: c.fid, Offset: offset, Count: count}, &rreaddir); err != nil { + return nil, err + } + + return rreaddir.Entries, nil +} + +// Readlink implements File.Readlink. +func (c *clientFile) Readlink() (string, error) { + rreadlink := Rreadlink{} + if err := c.client.sendRecv(&Treadlink{FID: c.fid}, &rreadlink); err != nil { + return "", err + } + + return rreadlink.Target, nil +} + +// Flush implements File.Flush. +func (c *clientFile) Flush() error { + if !VersionSupportsTflushf(c.client.version) { + return nil + } + return c.client.sendRecv(&Tflushf{FID: c.fid}, &Rflushf{}) +} diff --git a/pkg/p9/client_test.go b/pkg/p9/client_test.go new file mode 100644 index 000000000..06302a76a --- /dev/null +++ b/pkg/p9/client_test.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// TestVersion tests the version negotiation. +func TestVersion(t *testing.T) { + // First, create a new server and connection. + serverSocket, clientSocket, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer clientSocket.Close() + + // Create a new server and client. + s := NewServer(nil) + go s.Handle(serverSocket) + + // NewClient does a Tversion exchange, so this is our test for success. + c, err := NewClient(clientSocket, 1024*1024 /* 1M message size */, HighestVersionString()) + if err != nil { + t.Fatalf("got %v, expected nil", err) + } + + // Check a bogus version string. + if err := c.sendRecv(&Tversion{Version: "notokay", MSize: 1024 * 1024}, &Rversion{}); err != syscall.EINVAL { + t.Errorf("got %v expected %v", err, syscall.EINVAL) + } + + // Check a bogus version number. + if err := c.sendRecv(&Tversion{Version: "9P1000.L", MSize: 1024 * 1024}, &Rversion{}); err != syscall.EINVAL { + t.Errorf("got %v expected %v", err, syscall.EINVAL) + } + + // Check a too high version number. + if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion + 1), MSize: 1024 * 1024}, &Rversion{}); err != syscall.EAGAIN { + t.Errorf("got %v expected %v", err, syscall.EAGAIN) + } + + // Check an invalid MSize. + if err := c.sendRecv(&Tversion{Version: versionString(highestSupportedVersion), MSize: 0}, &Rversion{}); err != syscall.EINVAL { + t.Errorf("got %v expected %v", err, syscall.EINVAL) + } +} diff --git a/pkg/p9/file.go b/pkg/p9/file.go new file mode 100644 index 000000000..ae726f0b9 --- /dev/null +++ b/pkg/p9/file.go @@ -0,0 +1,180 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" +) + +// Attacher is provided by the user. +type Attacher interface { + // Attach returns a new File. + Attach(attachName string) (File, error) +} + +// File is a set of operations corresponding to a single node. +// +// Functions below MUST return syscall.Errno values. +// TODO: Enforce that with the type. +// +// These must be implemented in all circumstances. +type File interface { + // Walk walks to the path components given in names. + // + // Walk returns QIDs in the same order that the names were passed in. + // + // An empty list of arguments should return a copy of the current file. + Walk(names []string) ([]QID, File, error) + + // StatFS returns information about the file system associated with + // this file. + StatFS() (FSStat, error) + + // GetAttr returns attributes of this node. + GetAttr(req AttrMask) (QID, AttrMask, Attr, error) + + // SetAttr sets attributes on this node. + SetAttr(valid SetAttrMask, attr SetAttr) error + + // Remove removes the file. + // + // This is deprecated in favor of UnlinkAt below. + Remove() error + + // Rename renames the file. + Rename(directory File, name string) error + + // Close is called when all references are dropped on the server side, + // and Close should be called by the client to drop all references. + // + // For server-side implementations of Close, the error is ignored. + // + // Close must be called even when Open has not been called. + Close() error + + // Open is called prior to using read/write. + // + // The *fd.FD may be nil. If an *fd.FD is provided, ownership now + // belongs to the caller and the FD must be non-blocking. + // + // If Open returns a non-nil *fd.FD, it should do so for all possible + // OpenFlags. If Open returns a nil *fd.FD, it should similarly return + // a nil *fd.FD for all possible OpenFlags. + // + // This can be assumed to be one-shot only. + Open(mode OpenFlags) (*fd.FD, QID, uint32, error) + + // Read reads from this file. + // + // This may return io.EOF in addition to syscall.Errno values. + // + // Preconditions: Open has been called and returned success. + ReadAt(p []byte, offset uint64) (int, error) + + // Write writes to this file. + // + // This may return io.EOF in addition to syscall.Errno values. + // + // Preconditions: Open has been called and returned success. + WriteAt(p []byte, offset uint64) (int, error) + + // FSync syncs this node. + // + // Preconditions: Open has been called and returned success. + FSync() error + + // Create creates a new regular file and opens it according to the + // flags given. + // + // See p9.File.Open for a description of *fd.FD. + Create(name string, flags OpenFlags, permissions FileMode, uid UID, gid GID) (*fd.FD, File, QID, uint32, error) + + // Mkdir creates a subdirectory. + Mkdir(name string, permissions FileMode, uid UID, gid GID) (QID, error) + + // Symlink makes a new symbolic link. + Symlink(oldname string, newname string, uid UID, gid GID) (QID, error) + + // Link makes a new hard link. + Link(target File, newname string) error + + // Mknod makes a new device node. + Mknod(name string, permissions FileMode, major uint32, minor uint32, uid UID, gid GID) (QID, error) + + // RenameAt renames a given file to a new name in a potentially new + // directory. + // + // oldname must be a name relative to this file, which must be a + // directory. newname is a name relative to newdir. + // + // This is deprecated in favor of Rename. + RenameAt(oldname string, newdir File, newname string) error + + // UnlinkAt the given named file. + // + // name must be a file relative to this directory. + // + // Flags are implementation-specific (e.g. O_DIRECTORY), but are + // generally Linux unlinkat(2) flags. + UnlinkAt(name string, flags uint32) error + + // Readdir reads directory entries. + // + // This may return io.EOF in addition to syscall.Errno values. + // + // Preconditions: Open has been called and returned success. + Readdir(offset uint64, count uint32) ([]Dirent, error) + + // Readlink reads the link target. + Readlink() (string, error) + + // Flush is called prior to Close. + // + // Whereas Close drops all references to the file, Flush cleans up the + // file state. Behavior is implementation-specific. + // + // Flush is not related to flush(9p). Flush is an extension to 9P2000.L, + // see version.go. + Flush() error + + // WalkGetAttr walks to the next file and returns its maximal set of + // attributes. + // + // Server-side p9.Files may return syscall.ENOSYS to indicate that Walk + // and GetAttr should be used separately to satisfy this request. + WalkGetAttr([]string) ([]QID, File, AttrMask, Attr, error) + + // Connect establishes a new host-socket backed connection with a + // socket. A File does not need to be opened before it can be connected + // and it can be connected to multiple times resulting in a unique + // *fd.FD each time. In addition, the lifetime of the *fd.FD is + // independent from the lifetime of the p9.File and must be managed by + // the caller. + // + // The returned FD must be non-blocking. + // + // flags indicates the requested type of socket. + Connect(flags ConnectFlags) (*fd.FD, error) +} + +// DefaultWalkGetAttr implements File.WalkGetAttr to return ENOSYS for server-side Files. +type DefaultWalkGetAttr struct{} + +// WalkGetAttr implements File.WalkGetAttr. +func (DefaultWalkGetAttr) WalkGetAttr([]string) ([]QID, File, AttrMask, Attr, error) { + return nil, nil, AttrMask{}, Attr{}, syscall.ENOSYS +} diff --git a/pkg/p9/handlers.go b/pkg/p9/handlers.go new file mode 100644 index 000000000..83a66d3ae --- /dev/null +++ b/pkg/p9/handlers.go @@ -0,0 +1,705 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "io" + "os" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// newErr returns a new error message from an error. +func newErr(err error) *Rlerror { + switch e := err.(type) { + case syscall.Errno: + return &Rlerror{Error: uint32(e)} + case *os.PathError: + return newErr(e.Err) + case *os.SyscallError: + return newErr(e.Err) + default: + log.Warningf("unknown error: %v", err) + return &Rlerror{Error: uint32(syscall.EIO)} + } +} + +// handler is implemented for server-handled messages. +// +// See server.go for call information. +type handler interface { + // Handle handles the given message. + // + // This may modify the server state. The handle function must return a + // message which will be sent back to the client. It may be useful to + // use newErr to automatically extract an error message. + handle(cs *connState) message +} + +// handle implements handler.handle. +func (t *Tversion) handle(cs *connState) message { + if t.MSize == 0 { + return newErr(syscall.EINVAL) + } + if t.MSize > maximumLength { + return newErr(syscall.EINVAL) + } + atomic.StoreUint32(&cs.messageSize, t.MSize) + requested, ok := parseVersion(t.Version) + if !ok { + return newErr(syscall.EINVAL) + } + // The server cannot support newer versions that it doesn't know about. In this + // case we return EAGAIN to tell the client to try again with a lower version. + if requested > highestSupportedVersion { + return newErr(syscall.EAGAIN) + } + // From Tversion(9P): "The server may respond with the client’s version + // string, or a version string identifying an earlier defined protocol version". + atomic.StoreUint32(&cs.version, requested) + return &Rversion{ + MSize: t.MSize, + Version: t.Version, + } +} + +// handle implements handler.handle. +func (t *Tflush) handle(cs *connState) message { + cs.WaitTag(t.OldTag) + return &Rflush{} +} + +// handle implements handler.handle. +func (t *Twalk) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + if _, opened := ref.OpenFlags(); opened { + return newErr(syscall.EBUSY) + } + + // Do the walk. + qids, sf, err := ref.file.Walk(t.Names) + if err != nil { + return newErr(err) + } + + // Install the new FID. + cs.InsertFID(t.NewFID, &fidRef{file: sf}) + + return &Rwalk{QIDs: qids} +} + +// handle implements handler.handle. +func (t *Tclunk) handle(cs *connState) message { + if !cs.DeleteFID(t.FID) { + return newErr(syscall.EBADF) + } + return &Rclunk{} +} + +// handle implements handler.handle. +func (t *Tremove) handle(cs *connState) message { + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // "The remove request asks the file server both to remove the file + // represented by fid and to clunk the fid, even if the remove fails." + // + // "It is correct to consider remove to be a clunk with the side effect + // of removing the file if permissions allow." + // https://swtch.com/plan9port/man/man9/remove.html + err := ref.file.Remove() + + // Clunk the FID regardless of Remove error. + if !cs.DeleteFID(t.FID) { + return newErr(syscall.EBADF) + } + + if err != nil { + return newErr(err) + } + return &Rremove{} +} + +// handle implements handler.handle. +// +// We don't support authentication, so this just returns ENOSYS. +func (t *Tauth) handle(cs *connState) message { + return newErr(syscall.ENOSYS) +} + +// handle implements handler.handle. +func (t *Tattach) handle(cs *connState) message { + // Ensure no authentication FID is provided. + if t.Auth.AuthenticationFID != NoFID { + return newErr(syscall.EINVAL) + } + + // Do the attach. + sf, err := cs.server.attacher.Attach(t.Auth.AttachName) + if err != nil { + return newErr(err) + } + cs.InsertFID(t.FID, &fidRef{file: sf}) + + // Return an empty QID. + return &Rattach{} +} + +// handle implements handler.handle. +func (t *Tlopen) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + ref.openedMu.Lock() + defer ref.openedMu.Unlock() + + // Has it been opened already? + if ref.opened { + return newErr(syscall.EINVAL) + } + + // Do the open. + osFile, qid, ioUnit, err := ref.file.Open(t.Flags) + if err != nil { + return newErr(err) + } + + // Mark file as opened and set open mode. + ref.opened = true + ref.openFlags = t.Flags + + return &Rlopen{QID: qid, IoUnit: ioUnit, File: osFile} +} + +func (t *Tlcreate) do(cs *connState, uid UID) (*Rlcreate, error) { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return nil, syscall.EBADF + } + defer ref.DecRef() + + // Do the create. + osFile, nsf, qid, ioUnit, err := ref.file.Create(t.Name, t.OpenFlags, t.Permissions, uid, t.GID) + if err != nil { + return nil, err + } + + // Replace the FID reference. + // + // The new file will be opened already. + cs.InsertFID(t.FID, &fidRef{file: nsf, opened: true, openFlags: t.OpenFlags}) + + return &Rlcreate{Rlopen: Rlopen{QID: qid, IoUnit: ioUnit, File: osFile}}, nil +} + +// handle implements handler.handle. +func (t *Tlcreate) handle(cs *connState) message { + rlcreate, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rlcreate +} + +// handle implements handler.handle. +func (t *Tsymlink) handle(cs *connState) message { + rsymlink, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rsymlink +} + +func (t *Tsymlink) do(cs *connState, uid UID) (*Rsymlink, error) { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, syscall.EBADF + } + defer ref.DecRef() + + // Do the symlink. + qid, err := ref.file.Symlink(t.Target, t.Name, uid, t.GID) + if err != nil { + return nil, err + } + + return &Rsymlink{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *Tlink) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Lookup the other FID. + refTarget, ok := cs.LookupFID(t.Target) + if !ok { + return newErr(syscall.EBADF) + } + defer refTarget.DecRef() + + // Do the link. + if err := ref.file.Link(refTarget.file, t.Name); err != nil { + return newErr(err) + } + + return &Rlink{} +} + +// handle implements handler.handle. +func (t *Trenameat) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.OldDirectory) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Lookup the other FID. + refTarget, ok := cs.LookupFID(t.NewDirectory) + if !ok { + return newErr(syscall.EBADF) + } + defer refTarget.DecRef() + + // Do the rename. + if err := ref.file.RenameAt(t.OldName, refTarget.file, t.NewName); err != nil { + return newErr(err) + } + + return &Rrenameat{} +} + +// handle implements handler.handle. +func (t *Tunlinkat) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Do the unlink. + if err := ref.file.UnlinkAt(t.Name, t.Flags); err != nil { + return newErr(err) + } + + return &Runlinkat{} +} + +// handle implements handler.handle. +func (t *Trename) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Lookup the target. + refTarget, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(syscall.EBADF) + } + defer refTarget.DecRef() + + // Call the rename method. + if err := ref.file.Rename(refTarget.file, t.Name); err != nil { + return newErr(err) + } + + return &Rrename{} +} + +// handle implements handler.handle. +func (t *Treadlink) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Do the read. + target, err := ref.file.Readlink() + if err != nil { + return newErr(err) + } + + return &Rreadlink{target} +} + +// handle implements handler.handle. +func (t *Tread) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + openFlags, opened := ref.OpenFlags() + if !opened { + return newErr(syscall.EINVAL) + } + + // Can it be read? Check permissions. + if openFlags&OpenFlagsModeMask == WriteOnly { + return newErr(syscall.EPERM) + } + + // Constrain the size of the read buffer. + if int(t.Count) > int(maximumLength) { + return newErr(syscall.ENOBUFS) + } + + // Do the read. + data := make([]byte, t.Count) + n, err := ref.file.ReadAt(data, t.Offset) + if err != nil && err != io.EOF { + return newErr(err) + } + + return &Rread{Data: data[:n]} +} + +// handle implements handler.handle. +func (t *Twrite) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + openFlags, opened := ref.OpenFlags() + if !opened { + return newErr(syscall.EINVAL) + } + + // Can it be write? Check permissions. + if openFlags&OpenFlagsModeMask == ReadOnly { + return newErr(syscall.EPERM) + } + + // Do the write. + n, err := ref.file.WriteAt(t.Data, t.Offset) + if err != nil { + return newErr(err) + } + + return &Rwrite{Count: uint32(n)} +} + +// handle implements handler.handle. +func (t *Tmknod) handle(cs *connState) message { + rmknod, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rmknod +} + +func (t *Tmknod) do(cs *connState, uid UID) (*Rmknod, error) { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, syscall.EBADF + } + defer ref.DecRef() + + // Do the mknod. + qid, err := ref.file.Mknod(t.Name, t.Permissions, t.Major, t.Minor, uid, t.GID) + if err != nil { + return nil, err + } + + return &Rmknod{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *Tmkdir) handle(cs *connState) message { + rmkdir, err := t.do(cs, NoUID) + if err != nil { + return newErr(err) + } + return rmkdir +} + +func (t *Tmkdir) do(cs *connState, uid UID) (*Rmkdir, error) { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return nil, syscall.EBADF + } + defer ref.DecRef() + + // Do the mkdir. + qid, err := ref.file.Mkdir(t.Name, t.Permissions, uid, t.GID) + if err != nil { + return nil, err + } + + return &Rmkdir{QID: qid}, nil +} + +// handle implements handler.handle. +func (t *Tgetattr) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Get attributes. + qid, valid, attr, err := ref.file.GetAttr(t.AttrMask) + if err != nil { + return newErr(err) + } + + return &Rgetattr{QID: qid, Valid: valid, Attr: attr} +} + +// handle implements handler.handle. +func (t *Tsetattr) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Set attributes. + if err := ref.file.SetAttr(t.Valid, t.SetAttr); err != nil { + return newErr(err) + } + + return &Rsetattr{} +} + +// handle implements handler.handle. +func (t *Txattrwalk) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // We don't support extended attributes. + return newErr(syscall.ENODATA) +} + +// handle implements handler.handle. +func (t *Txattrcreate) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // We don't support extended attributes. + return newErr(syscall.ENOSYS) +} + +// handle implements handler.handle. +func (t *Treaddir) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.Directory) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + if _, opened := ref.OpenFlags(); !opened { + return newErr(syscall.EINVAL) + } + + // Read the entries. + entries, err := ref.file.Readdir(t.Offset, t.Count) + if err != nil && err != io.EOF { + return newErr(err) + } + + return &Rreaddir{Count: t.Count, Entries: entries} +} + +// handle implements handler.handle. +func (t *Tfsync) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + if _, opened := ref.OpenFlags(); !opened { + return newErr(syscall.EINVAL) + } + + err := ref.file.FSync() + if err != nil { + return newErr(err) + } + + return &Rfsync{} +} + +// handle implements handler.handle. +func (t *Tstatfs) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + st, err := ref.file.StatFS() + if err != nil { + return newErr(err) + } + + return &Rstatfs{st} +} + +// handle implements handler.handle. +func (t *Tflushf) handle(cs *connState) message { + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + if err := ref.file.Flush(); err != nil { + return newErr(err) + } + + return &Rflushf{} +} + +// handle implements handler.handle. +func (t *Twalkgetattr) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Has it been opened already? + if _, opened := ref.OpenFlags(); opened { + return newErr(syscall.EBUSY) + } + + // Do the walk. + qids, sf, valid, attr, err := ref.file.WalkGetAttr(t.Names) + if err == syscall.ENOSYS { + qids, sf, err = ref.file.Walk(t.Names) + if err != nil { + return newErr(err) + } + _, valid, attr, err = sf.GetAttr(AttrMaskAll()) + } + if err != nil { + return newErr(err) + } + + // Install the new FID. + cs.InsertFID(t.NewFID, &fidRef{file: sf}) + + return &Rwalkgetattr{QIDs: qids, Valid: valid, Attr: attr} +} + +// handle implements handler.handle. +func (t *Tucreate) handle(cs *connState) message { + rlcreate, err := t.Tlcreate.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &Rucreate{*rlcreate} +} + +// handle implements handler.handle. +func (t *Tumkdir) handle(cs *connState) message { + rmkdir, err := t.Tmkdir.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &Rumkdir{*rmkdir} +} + +// handle implements handler.handle. +func (t *Tusymlink) handle(cs *connState) message { + rsymlink, err := t.Tsymlink.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &Rusymlink{*rsymlink} +} + +// handle implements handler.handle. +func (t *Tumknod) handle(cs *connState) message { + rmknod, err := t.Tmknod.do(cs, t.UID) + if err != nil { + return newErr(err) + } + return &Rumknod{*rmknod} +} + +// handle implements handler.handle. +func (t *Tlconnect) handle(cs *connState) message { + // Lookup the FID. + ref, ok := cs.LookupFID(t.FID) + if !ok { + return newErr(syscall.EBADF) + } + defer ref.DecRef() + + // Do the connect. + osFile, err := ref.file.Connect(t.Flags) + if err != nil { + return newErr(err) + } + + return &Rlconnect{File: osFile} +} diff --git a/pkg/p9/local_server/BUILD b/pkg/p9/local_server/BUILD new file mode 100644 index 000000000..8229e6308 --- /dev/null +++ b/pkg/p9/local_server/BUILD @@ -0,0 +1,14 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_binary") + +go_binary( + name = "local_server", + srcs = ["local_server.go"], + deps = [ + "//pkg/fd", + "//pkg/log", + "//pkg/p9", + "//pkg/unet", + ], +) diff --git a/pkg/p9/local_server/local_server.go b/pkg/p9/local_server/local_server.go new file mode 100644 index 000000000..5b1e97711 --- /dev/null +++ b/pkg/p9/local_server/local_server.go @@ -0,0 +1,347 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Binary local_server provides a local 9P2000.L server for the p9 package. +// +// To use, first start the server: +// local_server /tmp/my_bind_addr +// +// Then, connect using the Linux 9P filesystem: +// mount -t 9p -o trans=unix,version=9P2000.L /tmp/my_bind_addr /mnt +// +// This package also serves as an examplar. +package main + +import ( + "os" + "path" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// local wraps a local file. +type local struct { + p9.DefaultWalkGetAttr + + path string + file *os.File +} + +// info constructs a QID for this file. +func (l *local) info() (p9.QID, os.FileInfo, error) { + var ( + qid p9.QID + fi os.FileInfo + err error + ) + + // Stat the file. + if l.file != nil { + fi, err = l.file.Stat() + } else { + fi, err = os.Lstat(l.path) + } + if err != nil { + log.Warningf("error stating %#v: %v", l, err) + return qid, nil, err + } + + // Construct the QID type. + qid.Type = p9.ModeFromOS(fi.Mode()).QIDType() + + // Save the path from the Ino. + qid.Path = fi.Sys().(*syscall.Stat_t).Ino + return qid, fi, nil +} + +// Attach implements p9.Attacher.Attach. +func (l *local) Attach(name string) (p9.File, error) { + return &local{path: path.Clean(name)}, nil +} + +// Walk implements p9.File.Walk. +func (l *local) Walk(names []string) ([]p9.QID, p9.File, error) { + var qids []p9.QID + last := &local{path: l.path} + for _, name := range names { + c := &local{path: path.Join(last.path, name)} + qid, _, err := c.info() + if err != nil { + return nil, nil, err + } + qids = append(qids, qid) + last = c + } + return qids, last, nil +} + +// StatFS implements p9.File.StatFS. +// +// Not implemented. +func (l *local) StatFS() (p9.FSStat, error) { + return p9.FSStat{}, syscall.ENOSYS +} + +// FSync implements p9.File.FSync. +func (l *local) FSync() error { + return l.file.Sync() +} + +// GetAttr implements p9.File.GetAttr. +// +// Not fully implemented. +func (l *local) GetAttr(req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + qid, fi, err := l.info() + if err != nil { + return qid, p9.AttrMask{}, p9.Attr{}, err + } + + stat := fi.Sys().(*syscall.Stat_t) + attr := p9.Attr{ + Mode: p9.FileMode(stat.Mode), + UID: p9.UID(stat.Uid), + GID: p9.GID(stat.Gid), + NLink: stat.Nlink, + RDev: stat.Rdev, + Size: uint64(stat.Size), + BlockSize: uint64(stat.Blksize), + Blocks: uint64(stat.Blocks), + ATimeSeconds: uint64(stat.Atim.Sec), + ATimeNanoSeconds: uint64(stat.Atim.Nsec), + MTimeSeconds: uint64(stat.Mtim.Sec), + MTimeNanoSeconds: uint64(stat.Mtim.Nsec), + CTimeSeconds: uint64(stat.Ctim.Sec), + CTimeNanoSeconds: uint64(stat.Ctim.Nsec), + } + valid := p9.AttrMask{ + Mode: true, + UID: true, + GID: true, + NLink: true, + RDev: true, + Size: true, + Blocks: true, + ATime: true, + MTime: true, + CTime: true, + } + + return qid, valid, attr, nil +} + +// SetAttr implements p9.File.SetAttr. +// +// Not implemented. +func (l *local) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + return syscall.ENOSYS +} + +// Remove implements p9.File.Remove. +// +// Not implemented. +func (l *local) Remove() error { + return syscall.ENOSYS +} + +// Rename implements p9.File.Rename. +// +// Not implemented. +func (l *local) Rename(directory p9.File, name string) error { + return syscall.ENOSYS +} + +// Close implements p9.File.Close. +func (l *local) Close() error { + if l.file != nil { + return l.file.Close() + } + return nil +} + +// Open implements p9.File.Open. +func (l *local) Open(mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) { + qid, _, err := l.info() + if err != nil { + return nil, qid, 0, err + } + + // Do the actual open. + f, err := os.OpenFile(l.path, int(mode), 0) + if err != nil { + return nil, qid, 0, err + } + l.file = f + + // Note: we don't send the local file for this server. + return nil, qid, 4096, nil +} + +// Read implements p9.File.Read. +func (l *local) ReadAt(p []byte, offset uint64) (int, error) { + return l.file.ReadAt(p, int64(offset)) +} + +// Write implements p9.File.Write. +func (l *local) WriteAt(p []byte, offset uint64) (int, error) { + return l.file.WriteAt(p, int64(offset)) +} + +// Create implements p9.File.Create. +func (l *local) Create(name string, mode p9.OpenFlags, permissions p9.FileMode, _ p9.UID, _ p9.GID) (*fd.FD, p9.File, p9.QID, uint32, error) { + f, err := os.OpenFile(l.path, int(mode)|syscall.O_CREAT|syscall.O_EXCL, os.FileMode(permissions)) + if err != nil { + return nil, nil, p9.QID{}, 0, err + } + + l2 := &local{path: path.Join(l.path, name), file: f} + qid, _, err := l2.info() + if err != nil { + l2.Close() + return nil, nil, p9.QID{}, 0, err + } + + return nil, l2, qid, 4096, nil +} + +// Mkdir implements p9.File.Mkdir. +// +// Not properly implemented. +func (l *local) Mkdir(name string, permissions p9.FileMode, _ p9.UID, _ p9.GID) (p9.QID, error) { + if err := os.Mkdir(path.Join(l.path, name), os.FileMode(permissions)); err != nil { + return p9.QID{}, err + } + + // Blank QID. + return p9.QID{}, nil +} + +// Symlink implements p9.File.Symlink. +// +// Not properly implemented. +func (l *local) Symlink(oldname string, newname string, _ p9.UID, _ p9.GID) (p9.QID, error) { + if err := os.Symlink(oldname, path.Join(l.path, newname)); err != nil { + return p9.QID{}, err + } + + // Blank QID. + return p9.QID{}, nil +} + +// Link implements p9.File.Link. +// +// Not properly implemented. +func (l *local) Link(target p9.File, newname string) error { + if err := os.Link(target.(*local).path, path.Join(l.path, newname)); err != nil { + return err + } + + return nil +} + +// Mknod implements p9.File.Mknod. +// +// Not implemented. +func (l *local) Mknod(name string, permissions p9.FileMode, major uint32, minor uint32, _ p9.UID, _ p9.GID) (p9.QID, error) { + return p9.QID{}, syscall.ENOSYS +} + +// RenameAt implements p9.File.RenameAt. +// +// Not implemented. +func (l *local) RenameAt(oldname string, newdir p9.File, newname string) error { + return syscall.ENOSYS +} + +// UnlinkAt implements p9.File.UnlinkAt. +// +// Not implemented. +func (l *local) UnlinkAt(name string, flags uint32) error { + return syscall.ENOSYS +} + +// Readdir implements p9.File.Readdir. +func (l *local) Readdir(offset uint64, count uint32) ([]p9.Dirent, error) { + // We only do *all* dirents in single shot. + const maxDirentBuffer = 1024 * 1024 + buf := make([]byte, maxDirentBuffer) + n, err := syscall.ReadDirent(int(l.file.Fd()), buf) + if err != nil { + // Return zero entries. + return nil, nil + } + + // Parse the entries; note that we read up to offset+count here. + _, newCount, newNames := syscall.ParseDirent(buf[:n], int(offset)+int(count), nil) + var dirents []p9.Dirent + for i := int(offset); i >= 0 && i < newCount; i++ { + entry := local{path: path.Join(l.path, newNames[i])} + qid, _, err := entry.info() + if err != nil { + continue + } + dirents = append(dirents, p9.Dirent{ + QID: qid, + Type: qid.Type, + Name: newNames[i], + Offset: uint64(i + 1), + }) + } + + return dirents, nil +} + +// Readlink implements p9.File.Readlink. +// +// Not properly implemented. +func (l *local) Readlink() (string, error) { + return os.Readlink(l.path) +} + +// Flush implements p9.File.Flush. +func (l *local) Flush() error { + return nil +} + +// Connect implements p9.File.Connect. +func (l *local) Connect(p9.ConnectFlags) (*fd.FD, error) { + return nil, syscall.ECONNREFUSED +} + +func main() { + log.SetLevel(log.Debug) + + if len(os.Args) != 2 { + log.Warningf("usage: %s <bind-addr>", os.Args[0]) + os.Exit(1) + } + + // Bind and listen on the socket. + serverSocket, err := unet.BindAndListen(os.Args[1], false) + if err != nil { + log.Warningf("err binding: %v", err) + os.Exit(1) + } + + // Run the server. + s := p9.NewServer(&local{}) + s.Serve(serverSocket) +} + +var ( + _ p9.File = &local{} +) diff --git a/pkg/p9/messages.go b/pkg/p9/messages.go new file mode 100644 index 000000000..b3d76801b --- /dev/null +++ b/pkg/p9/messages.go @@ -0,0 +1,2271 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "reflect" + + "gvisor.googlesource.com/gvisor/pkg/fd" +) + +// ErrInvalidMsgType is returned when an unsupported message type is found. +type ErrInvalidMsgType struct { + MsgType +} + +// Error returns a useful string. +func (e *ErrInvalidMsgType) Error() string { + return fmt.Sprintf("invalid message type: %d", e.MsgType) +} + +// message is a generic 9P message. +type message interface { + encoder + fmt.Stringer + + // Type returns the message type number. + Type() MsgType +} + +// payloader is a special message which may include an inline payload. +type payloader interface { + // FixedSize returns the size of the fixed portion of this message. + FixedSize() uint32 + + // Payload returns the payload for sending. + Payload() []byte + + // SetPayload returns the decoded message. + // + // This is going to be total message size - FixedSize. But this should + // be validated during Decode, which will be called after SetPayload. + SetPayload([]byte) +} + +// filer is a message capable of passing a file. +type filer interface { + // FilePayload returns the file payload. + FilePayload() *fd.FD + + // SetFilePayload sets the file payload. + SetFilePayload(*fd.FD) +} + +// Tversion is a version request. +type Tversion struct { + // MSize is the message size to use. + MSize uint32 + + // Version is the version string. + // + // For this implementation, this must be 9P2000.L. + Version string +} + +// Decode implements encoder.Decode. +func (t *Tversion) Decode(b *buffer) { + t.MSize = b.Read32() + t.Version = b.ReadString() +} + +// Encode implements encoder.Encode. +func (t *Tversion) Encode(b *buffer) { + b.Write32(t.MSize) + b.WriteString(t.Version) +} + +// Type implements message.Type. +func (*Tversion) Type() MsgType { + return MsgTversion +} + +// String implements fmt.Stringer. +func (t *Tversion) String() string { + return fmt.Sprintf("Tversion{MSize: %d, Version: %s}", t.MSize, t.Version) +} + +// Rversion is a version response. +type Rversion struct { + // MSize is the negotiated size. + MSize uint32 + + // Version is the negotiated version. + Version string +} + +// Decode implements encoder.Decode. +func (r *Rversion) Decode(b *buffer) { + r.MSize = b.Read32() + r.Version = b.ReadString() +} + +// Encode implements encoder.Encode. +func (r *Rversion) Encode(b *buffer) { + b.Write32(r.MSize) + b.WriteString(r.Version) +} + +// Type implements message.Type. +func (*Rversion) Type() MsgType { + return MsgRversion +} + +// String implements fmt.Stringer. +func (r *Rversion) String() string { + return fmt.Sprintf("Rversion{MSize: %d, Version: %s}", r.MSize, r.Version) +} + +// Tflush is a flush request. +type Tflush struct { + // OldTag is the tag to wait on. + OldTag Tag +} + +// Decode implements encoder.Decode. +func (t *Tflush) Decode(b *buffer) { + t.OldTag = b.ReadTag() +} + +// Encode implements encoder.Encode. +func (t *Tflush) Encode(b *buffer) { + b.WriteTag(t.OldTag) +} + +// Type implements message.Type. +func (*Tflush) Type() MsgType { + return MsgTflush +} + +// String implements fmt.Stringer. +func (t *Tflush) String() string { + return fmt.Sprintf("Tflush{OldTag: %d}", t.OldTag) +} + +// Rflush is a flush response. +type Rflush struct { +} + +// Decode implements encoder.Decode. +func (*Rflush) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rflush) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rflush) Type() MsgType { + return MsgRflush +} + +// String implements fmt.Stringer. +func (r *Rflush) String() string { + return fmt.Sprintf("RFlush{}") +} + +// Twalk is a walk request. +type Twalk struct { + // FID is the FID to be walked. + FID FID + + // NewFID is the resulting FID. + NewFID FID + + // Names are the set of names to be walked. + Names []string +} + +// Decode implements encoder.Decode. +func (t *Twalk) Decode(b *buffer) { + t.FID = b.ReadFID() + t.NewFID = b.ReadFID() + n := b.Read16() + for i := 0; i < int(n); i++ { + t.Names = append(t.Names, b.ReadString()) + } +} + +// Encode implements encoder.Encode. +func (t *Twalk) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteFID(t.NewFID) + b.Write16(uint16(len(t.Names))) + for _, name := range t.Names { + b.WriteString(name) + } +} + +// Type implements message.Type. +func (*Twalk) Type() MsgType { + return MsgTwalk +} + +// String implements fmt.Stringer. +func (t *Twalk) String() string { + return fmt.Sprintf("Twalk{FID: %d, NewFID: %d, Names: %v}", t.FID, t.NewFID, t.Names) +} + +// Rwalk is a walk response. +type Rwalk struct { + // QIDs are the set of QIDs returned. + QIDs []QID +} + +// Decode implements encoder.Decode. +func (r *Rwalk) Decode(b *buffer) { + n := b.Read16() + for i := 0; i < int(n); i++ { + var q QID + q.Decode(b) + r.QIDs = append(r.QIDs, q) + } +} + +// Encode implements encoder.Encode. +func (r *Rwalk) Encode(b *buffer) { + b.Write16(uint16(len(r.QIDs))) + for _, q := range r.QIDs { + q.Encode(b) + } +} + +// Type implements message.Type. +func (*Rwalk) Type() MsgType { + return MsgRwalk +} + +// String implements fmt.Stringer. +func (r *Rwalk) String() string { + return fmt.Sprintf("Rwalk{QIDs: %v}", r.QIDs) +} + +// Tclunk is a close request. +type Tclunk struct { + // FID is the FID to be closed. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Tclunk) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Tclunk) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Tclunk) Type() MsgType { + return MsgTclunk +} + +// String implements fmt.Stringer. +func (t *Tclunk) String() string { + return fmt.Sprintf("Tclunk{FID: %d}", t.FID) +} + +// Rclunk is a close response. +type Rclunk struct { +} + +// Decode implements encoder.Decode. +func (*Rclunk) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rclunk) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rclunk) Type() MsgType { + return MsgRclunk +} + +// String implements fmt.Stringer. +func (r *Rclunk) String() string { + return fmt.Sprintf("Rclunk{}") +} + +// Tremove is a remove request. +// +// This will eventually be replaced by Tunlinkat. +type Tremove struct { + // FID is the FID to be removed. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Tremove) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Tremove) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Tremove) Type() MsgType { + return MsgTremove +} + +// String implements fmt.Stringer. +func (t *Tremove) String() string { + return fmt.Sprintf("Tremove{FID: %d}", t.FID) +} + +// Rremove is a remove response. +type Rremove struct { +} + +// Decode implements encoder.Decode. +func (*Rremove) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rremove) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rremove) Type() MsgType { + return MsgRremove +} + +// String implements fmt.Stringer. +func (r *Rremove) String() string { + return fmt.Sprintf("Rremove{}") +} + +// Rlerror is an error response. +// +// Note that this replaces the error code used in 9p. +type Rlerror struct { + Error uint32 +} + +// Decode implements encoder.Decode. +func (r *Rlerror) Decode(b *buffer) { + r.Error = b.Read32() +} + +// Encode implements encoder.Encode. +func (r *Rlerror) Encode(b *buffer) { + b.Write32(r.Error) +} + +// Type implements message.Type. +func (*Rlerror) Type() MsgType { + return MsgRlerror +} + +// String implements fmt.Stringer. +func (r *Rlerror) String() string { + return fmt.Sprintf("Rlerror{Error: %d}", r.Error) +} + +// Tauth is an authentication request. +type Tauth struct { + // AuthenticationFID is the FID to attach the authentication result. + AuthenticationFID FID + + // UserName is the user to attach. + UserName string + + // AttachName is the attach name. + AttachName string + + // UserID is the numeric identifier for UserName. + UID UID +} + +// Decode implements encoder.Decode. +func (t *Tauth) Decode(b *buffer) { + t.AuthenticationFID = b.ReadFID() + t.UserName = b.ReadString() + t.AttachName = b.ReadString() + t.UID = b.ReadUID() +} + +// Encode implements encoder.Encode. +func (t *Tauth) Encode(b *buffer) { + b.WriteFID(t.AuthenticationFID) + b.WriteString(t.UserName) + b.WriteString(t.AttachName) + b.WriteUID(t.UID) +} + +// Type implements message.Type. +func (*Tauth) Type() MsgType { + return MsgTauth +} + +// String implements fmt.Stringer. +func (t *Tauth) String() string { + return fmt.Sprintf("Tauth{AuthFID: %d, UserName: %s, AttachName: %s, UID: %d", t.AuthenticationFID, t.UserName, t.AttachName, t.UID) +} + +// Rauth is an authentication response. +// +// Encode, Decode and Length are inherited directly from QID. +type Rauth struct { + QID +} + +// Type implements message.Type. +func (*Rauth) Type() MsgType { + return MsgRauth +} + +// String implements fmt.Stringer. +func (r *Rauth) String() string { + return fmt.Sprintf("Rauth{QID: %s}", r.QID) +} + +// Tattach is an attach request. +type Tattach struct { + // FID is the FID to be attached. + FID FID + + // Auth is the embedded authentication request. + // + // See client.Attach for information regarding authentication. + Auth Tauth +} + +// Decode implements encoder.Decode. +func (t *Tattach) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Auth.Decode(b) +} + +// Encode implements encoder.Encode. +func (t *Tattach) Encode(b *buffer) { + b.WriteFID(t.FID) + t.Auth.Encode(b) +} + +// Type implements message.Type. +func (*Tattach) Type() MsgType { + return MsgTattach +} + +// String implements fmt.Stringer. +func (t *Tattach) String() string { + return fmt.Sprintf("Tattach{FID: %d, AuthFID: %d, UserName: %s, AttachName: %s, UID: %d}", t.FID, t.Auth.AuthenticationFID, t.Auth.UserName, t.Auth.AttachName, t.Auth.UID) +} + +// Rattach is an attach response. +type Rattach struct { + QID +} + +// Type implements message.Type. +func (*Rattach) Type() MsgType { + return MsgRattach +} + +// String implements fmt.Stringer. +func (r *Rattach) String() string { + return fmt.Sprintf("Rattach{QID: %s}", r.QID) +} + +// Tlopen is an open request. +type Tlopen struct { + // FID is the FID to be opened. + FID FID + + // Flags are the open flags. + Flags OpenFlags +} + +// Decode implements encoder.Decode. +func (t *Tlopen) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Flags = b.ReadOpenFlags() +} + +// Encode implements encoder.Encode. +func (t *Tlopen) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteOpenFlags(t.Flags) +} + +// Type implements message.Type. +func (*Tlopen) Type() MsgType { + return MsgTlopen +} + +// String implements fmt.Stringer. +func (t *Tlopen) String() string { + return fmt.Sprintf("Tlopen{FID: %d, Flags: %v}", t.FID, t.Flags) +} + +// Rlopen is a open response. +type Rlopen struct { + // QID is the file's QID. + QID QID + + // IoUnit is the recommended I/O unit. + IoUnit uint32 + + // File may be attached via the socket. + // + // This is an extension specific to this package. + File *fd.FD +} + +// Decode implements encoder.Decode. +func (r *Rlopen) Decode(b *buffer) { + r.QID.Decode(b) + r.IoUnit = b.Read32() +} + +// Encode implements encoder.Encode. +func (r *Rlopen) Encode(b *buffer) { + r.QID.Encode(b) + b.Write32(r.IoUnit) +} + +// Type implements message.Type. +func (*Rlopen) Type() MsgType { + return MsgRlopen +} + +// FilePayload returns the file payload. +func (r *Rlopen) FilePayload() *fd.FD { + return r.File +} + +// SetFilePayload sets the received file. +func (r *Rlopen) SetFilePayload(file *fd.FD) { + r.File = file +} + +// String implements fmt.Stringer. +func (r *Rlopen) String() string { + return fmt.Sprintf("Rlopen{QID: %s, IoUnit: %d, File: %v}", r.QID, r.IoUnit, r.File) +} + +// Tlcreate is a create request. +type Tlcreate struct { + // FID is the parent FID. + // + // This becomes the new file. + FID FID + + // Name is the file name to create. + Name string + + // Mode is the open mode (O_RDWR, etc.). + // + // Note that flags like O_TRUNC are ignored, as is O_EXCL. All + // create operations are exclusive. + OpenFlags OpenFlags + + // Permissions is the set of permission bits. + Permissions FileMode + + // GID is the group ID to use for creating the file. + GID GID +} + +// Decode implements encoder.Decode. +func (t *Tlcreate) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Name = b.ReadString() + t.OpenFlags = b.ReadOpenFlags() + t.Permissions = b.ReadPermissions() + t.GID = b.ReadGID() +} + +// Encode implements encoder.Encode. +func (t *Tlcreate) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteString(t.Name) + b.WriteOpenFlags(t.OpenFlags) + b.WritePermissions(t.Permissions) + b.WriteGID(t.GID) +} + +// Type implements message.Type. +func (*Tlcreate) Type() MsgType { + return MsgTlcreate +} + +// String implements fmt.Stringer. +func (t *Tlcreate) String() string { + return fmt.Sprintf("Tlcreate{FID: %d, Name: %s, OpenFlags: %s, Permissions: 0o%o, GID: %d}", t.FID, t.Name, t.OpenFlags, t.Permissions, t.GID) +} + +// Rlcreate is a create response. +// +// The Encode, Decode, etc. methods are inherited from Rlopen. +type Rlcreate struct { + Rlopen +} + +// Type implements message.Type. +func (*Rlcreate) Type() MsgType { + return MsgRlcreate +} + +// String implements fmt.Stringer. +func (r *Rlcreate) String() string { + return fmt.Sprintf("Rlcreate{QID: %s, IoUnit: %d, File: %v}", r.QID, r.IoUnit, r.File) +} + +// Tsymlink is a symlink request. +type Tsymlink struct { + // Directory is the directory FID. + Directory FID + + // Name is the new in the directory. + Name string + + // Target is the symlink target. + Target string + + // GID is the owning group. + GID GID +} + +// Decode implements encoder.Decode. +func (t *Tsymlink) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Target = b.ReadString() + t.GID = b.ReadGID() +} + +// Encode implements encoder.Encode. +func (t *Tsymlink) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WriteString(t.Target) + b.WriteGID(t.GID) +} + +// Type implements message.Type. +func (*Tsymlink) Type() MsgType { + return MsgTsymlink +} + +// String implements fmt.Stringer. +func (t *Tsymlink) String() string { + return fmt.Sprintf("Tsymlink{DirectoryFID: %d, Name: %s, Target: %s, GID: %d}", t.Directory, t.Name, t.Target, t.GID) +} + +// Rsymlink is a symlink response. +type Rsymlink struct { + // QID is the new symlink's QID. + QID QID +} + +// Decode implements encoder.Decode. +func (r *Rsymlink) Decode(b *buffer) { + r.QID.Decode(b) +} + +// Encode implements encoder.Encode. +func (r *Rsymlink) Encode(b *buffer) { + r.QID.Encode(b) +} + +// Type implements message.Type. +func (*Rsymlink) Type() MsgType { + return MsgRsymlink +} + +// String implements fmt.Stringer. +func (r *Rsymlink) String() string { + return fmt.Sprintf("Rsymlink{QID: %s}", r.QID) +} + +// Tlink is a link request. +type Tlink struct { + // Directory is the directory to contain the link. + Directory FID + + // FID is the target. + Target FID + + // Name is the new source name. + Name string +} + +// Decode implements encoder.Decode. +func (t *Tlink) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Target = b.ReadFID() + t.Name = b.ReadString() +} + +// Encode implements encoder.Encode. +func (t *Tlink) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteFID(t.Target) + b.WriteString(t.Name) +} + +// Type implements message.Type. +func (*Tlink) Type() MsgType { + return MsgTlink +} + +// String implements fmt.Stringer. +func (t *Tlink) String() string { + return fmt.Sprintf("Tlink{DirectoryFID: %d, TargetFID: %d, Name: %s}", t.Directory, t.Target, t.Name) +} + +// Rlink is a link response. +type Rlink struct { +} + +// Type implements message.Type. +func (*Rlink) Type() MsgType { + return MsgRlink +} + +// Decode implements encoder.Decode. +func (*Rlink) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rlink) Encode(b *buffer) { +} + +// String implements fmt.Stringer. +func (r *Rlink) String() string { + return fmt.Sprintf("Rlink{}") +} + +// Trenameat is a rename request. +type Trenameat struct { + // OldDirectory is the source directory. + OldDirectory FID + + // OldName is the source file name. + OldName string + + // NewDirectory is the target directory. + NewDirectory FID + + // NewName is the new file name. + NewName string +} + +// Decode implements encoder.Decode. +func (t *Trenameat) Decode(b *buffer) { + t.OldDirectory = b.ReadFID() + t.OldName = b.ReadString() + t.NewDirectory = b.ReadFID() + t.NewName = b.ReadString() +} + +// Encode implements encoder.Encode. +func (t *Trenameat) Encode(b *buffer) { + b.WriteFID(t.OldDirectory) + b.WriteString(t.OldName) + b.WriteFID(t.NewDirectory) + b.WriteString(t.NewName) +} + +// Type implements message.Type. +func (*Trenameat) Type() MsgType { + return MsgTrenameat +} + +// String implements fmt.Stringer. +func (t *Trenameat) String() string { + return fmt.Sprintf("TrenameAt{OldDirectoryFID: %d, OldName: %s, NewDirectoryFID: %d, NewName: %s}", t.OldDirectory, t.OldName, t.NewDirectory, t.NewName) +} + +// Rrenameat is a rename response. +type Rrenameat struct { +} + +// Decode implements encoder.Decode. +func (*Rrenameat) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rrenameat) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rrenameat) Type() MsgType { + return MsgRrenameat +} + +// String implements fmt.Stringer. +func (r *Rrenameat) String() string { + return fmt.Sprintf("Rrenameat{}") +} + +// Tunlinkat is an unlink request. +type Tunlinkat struct { + // Directory is the originating directory. + Directory FID + + // Name is the name of the entry to unlink. + Name string + + // Flags are extra flags (e.g. O_DIRECTORY). These are not interpreted by p9. + Flags uint32 +} + +// Decode implements encoder.Decode. +func (t *Tunlinkat) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Flags = b.Read32() +} + +// Encode implements encoder.Encode. +func (t *Tunlinkat) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.Write32(t.Flags) +} + +// Type implements message.Type. +func (*Tunlinkat) Type() MsgType { + return MsgTunlinkat +} + +// String implements fmt.Stringer. +func (t *Tunlinkat) String() string { + return fmt.Sprintf("Tunlinkat{DirectoryFID: %d, Name: %s, Flags: 0x%X}", t.Directory, t.Name, t.Flags) +} + +// Runlinkat is an unlink response. +type Runlinkat struct { +} + +// Decode implements encoder.Decode. +func (*Runlinkat) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Runlinkat) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Runlinkat) Type() MsgType { + return MsgRunlinkat +} + +// String implements fmt.Stringer. +func (r *Runlinkat) String() string { + return fmt.Sprintf("Runlinkat{}") +} + +// Trename is a rename request. +// +// Note that this generally isn't used anymore, and ideally all rename calls +// should Trenameat below. +type Trename struct { + // FID is the FID to rename. + FID FID + + // Directory is the target directory. + Directory FID + + // Name is the new file name. + Name string +} + +// Decode implements encoder.Decode. +func (t *Trename) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Directory = b.ReadFID() + t.Name = b.ReadString() +} + +// Encode implements encoder.Encode. +func (t *Trename) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteFID(t.Directory) + b.WriteString(t.Name) +} + +// Type implements message.Type. +func (*Trename) Type() MsgType { + return MsgTrename +} + +// String implements fmt.Stringer. +func (t *Trename) String() string { + return fmt.Sprintf("Trename{FID: %d, DirectoryFID: %d, Name: %s}", t.FID, t.Directory, t.Name) +} + +// Rrename is a rename response. +type Rrename struct { +} + +// Decode implements encoder.Decode. +func (*Rrename) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rrename) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rrename) Type() MsgType { + return MsgRrename +} + +// String implements fmt.Stringer. +func (r *Rrename) String() string { + return fmt.Sprintf("Rrename{}") +} + +// Treadlink is a readlink request. +type Treadlink struct { + // FID is the symlink. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Treadlink) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Treadlink) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Treadlink) Type() MsgType { + return MsgTreadlink +} + +// String implements fmt.Stringer. +func (t *Treadlink) String() string { + return fmt.Sprintf("Treadlink{FID: %d}", t.FID) +} + +// Rreadlink is a readlink response. +type Rreadlink struct { + // Target is the symlink target. + Target string +} + +// Decode implements encoder.Decode. +func (r *Rreadlink) Decode(b *buffer) { + r.Target = b.ReadString() +} + +// Encode implements encoder.Encode. +func (r *Rreadlink) Encode(b *buffer) { + b.WriteString(r.Target) +} + +// Type implements message.Type. +func (*Rreadlink) Type() MsgType { + return MsgRreadlink +} + +// String implements fmt.Stringer. +func (r *Rreadlink) String() string { + return fmt.Sprintf("Rreadlink{Target: %s}", r.Target) +} + +// Tread is a read request. +type Tread struct { + // FID is the FID to read. + FID FID + + // Offset indicates the file offset. + Offset uint64 + + // Count indicates the number of bytes to read. + Count uint32 +} + +// Decode implements encoder.Decode. +func (t *Tread) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Offset = b.Read64() + t.Count = b.Read32() +} + +// Encode implements encoder.Encode. +func (t *Tread) Encode(b *buffer) { + b.WriteFID(t.FID) + b.Write64(t.Offset) + b.Write32(t.Count) +} + +// Type implements message.Type. +func (*Tread) Type() MsgType { + return MsgTread +} + +// String implements fmt.Stringer. +func (t *Tread) String() string { + return fmt.Sprintf("Tread{FID: %d, Offset: %d, Count: %d}", t.FID, t.Offset, t.Count) +} + +// Rread is the response for a Tread. +type Rread struct { + // Data is the resulting data. + Data []byte +} + +// Decode implements encoder.Decode. +// +// Data is automatically decoded via Payload. +func (r *Rread) Decode(b *buffer) { + count := b.Read32() + if count != uint32(len(r.Data)) { + b.markOverrun() + } +} + +// Encode implements encoder.Encode. +// +// Data is automatically encoded via Payload. +func (r *Rread) Encode(b *buffer) { + b.Write32(uint32(len(r.Data))) +} + +// Type implements message.Type. +func (*Rread) Type() MsgType { + return MsgRread +} + +// FixedSize implements payloader.FixedSize. +func (*Rread) FixedSize() uint32 { + return 4 +} + +// Payload implements payloader.Payload. +func (r *Rread) Payload() []byte { + return r.Data +} + +// SetPayload implements payloader.SetPayload. +func (r *Rread) SetPayload(p []byte) { + r.Data = p +} + +// String implements fmt.Stringer. +func (r *Rread) String() string { + return fmt.Sprintf("Rread{len(Data): %d}", len(r.Data)) +} + +// Twrite is a write request. +type Twrite struct { + // FID is the FID to read. + FID FID + + // Offset indicates the file offset. + Offset uint64 + + // Data is the data to be written. + Data []byte +} + +// Decode implements encoder.Decode. +func (t *Twrite) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Offset = b.Read64() + count := b.Read32() + if count != uint32(len(t.Data)) { + b.markOverrun() + } +} + +// Encode implements encoder.Encode. +// +// This uses the buffer payload to avoid a copy. +func (t *Twrite) Encode(b *buffer) { + b.WriteFID(t.FID) + b.Write64(t.Offset) + b.Write32(uint32(len(t.Data))) +} + +// Type implements message.Type. +func (*Twrite) Type() MsgType { + return MsgTwrite +} + +// FixedSize implements payloader.FixedSize. +func (*Twrite) FixedSize() uint32 { + return 16 +} + +// Payload implements payloader.Payload. +func (t *Twrite) Payload() []byte { + return t.Data +} + +// SetPayload implements payloader.SetPayload. +func (t *Twrite) SetPayload(p []byte) { + t.Data = p +} + +// String implements fmt.Stringer. +func (t *Twrite) String() string { + return fmt.Sprintf("Twrite{FID: %v, Offset %d, len(Data): %d}", t.FID, t.Offset, len(t.Data)) +} + +// Rwrite is the response for a Twrite. +type Rwrite struct { + // Count indicates the number of bytes successfully written. + Count uint32 +} + +// Decode implements encoder.Decode. +func (r *Rwrite) Decode(b *buffer) { + r.Count = b.Read32() +} + +// Encode implements encoder.Encode. +func (r *Rwrite) Encode(b *buffer) { + b.Write32(r.Count) +} + +// Type implements message.Type. +func (*Rwrite) Type() MsgType { + return MsgRwrite +} + +// String implements fmt.Stringer. +func (r *Rwrite) String() string { + return fmt.Sprintf("Rwrite{Count: %d}", r.Count) +} + +// Tmknod is a mknod request. +type Tmknod struct { + // Directory is the parent directory. + Directory FID + + // Name is the device name. + Name string + + // Permissions are the device permissions. + Permissions FileMode + + // Major is the device major number. + Major uint32 + + // Minor is the device minor number. + Minor uint32 + + // GID is the device GID. + GID GID +} + +// Decode implements encoder.Decode. +func (t *Tmknod) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Permissions = b.ReadPermissions() + t.Major = b.Read32() + t.Minor = b.Read32() + t.GID = b.ReadGID() +} + +// Encode implements encoder.Encode. +func (t *Tmknod) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WritePermissions(t.Permissions) + b.Write32(t.Major) + b.Write32(t.Minor) + b.WriteGID(t.GID) +} + +// Type implements message.Type. +func (*Tmknod) Type() MsgType { + return MsgTmknod +} + +// String implements fmt.Stringer. +func (t *Tmknod) String() string { + return fmt.Sprintf("Tmknod{DirectoryFID: %d, Name: %s, Permissions: 0o%o, Major: %d, Minor: %d, GID: %d}", t.Directory, t.Name, t.Permissions, t.Major, t.Minor, t.GID) +} + +// Rmknod is a mknod response. +type Rmknod struct { + // QID is the resulting QID. + QID QID +} + +// Decode implements encoder.Decode. +func (r *Rmknod) Decode(b *buffer) { + r.QID.Decode(b) +} + +// Encode implements encoder.Encode. +func (r *Rmknod) Encode(b *buffer) { + r.QID.Encode(b) +} + +// Type implements message.Type. +func (*Rmknod) Type() MsgType { + return MsgRmknod +} + +// String implements fmt.Stringer. +func (r *Rmknod) String() string { + return fmt.Sprintf("Rmknod{QID: %s}", r.QID) +} + +// Tmkdir is a mkdir request. +type Tmkdir struct { + // Directory is the parent directory. + Directory FID + + // Name is the new directory name. + Name string + + // Permissions is the set of permission bits. + Permissions FileMode + + // GID is the owning group. + GID GID +} + +// Decode implements encoder.Decode. +func (t *Tmkdir) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Name = b.ReadString() + t.Permissions = b.ReadPermissions() + t.GID = b.ReadGID() +} + +// Encode implements encoder.Encode. +func (t *Tmkdir) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.WriteString(t.Name) + b.WritePermissions(t.Permissions) + b.WriteGID(t.GID) +} + +// Type implements message.Type. +func (*Tmkdir) Type() MsgType { + return MsgTmkdir +} + +// String implements fmt.Stringer. +func (t *Tmkdir) String() string { + return fmt.Sprintf("Tmkdir{DirectoryFID: %d, Name: %s, Permissions: 0o%o, GID: %d}", t.Directory, t.Name, t.Permissions, t.GID) +} + +// Rmkdir is a mkdir response. +type Rmkdir struct { + // QID is the resulting QID. + QID QID +} + +// Decode implements encoder.Decode. +func (r *Rmkdir) Decode(b *buffer) { + r.QID.Decode(b) +} + +// Encode implements encoder.Encode. +func (r *Rmkdir) Encode(b *buffer) { + r.QID.Encode(b) +} + +// Type implements message.Type. +func (*Rmkdir) Type() MsgType { + return MsgRmkdir +} + +// String implements fmt.Stringer. +func (r *Rmkdir) String() string { + return fmt.Sprintf("Rmkdir{QID: %s}", r.QID) +} + +// Tgetattr is a getattr request. +type Tgetattr struct { + // FID is the FID to get attributes for. + FID FID + + // AttrMask is the set of attributes to get. + AttrMask AttrMask +} + +// Decode implements encoder.Decode. +func (t *Tgetattr) Decode(b *buffer) { + t.FID = b.ReadFID() + t.AttrMask.Decode(b) +} + +// Encode implements encoder.Encode. +func (t *Tgetattr) Encode(b *buffer) { + b.WriteFID(t.FID) + t.AttrMask.Encode(b) +} + +// Type implements message.Type. +func (*Tgetattr) Type() MsgType { + return MsgTgetattr +} + +// String implements fmt.Stringer. +func (t *Tgetattr) String() string { + return fmt.Sprintf("Tgetattr{FID: %d, AttrMask: %s}", t.FID, t.AttrMask) +} + +// Rgetattr is a getattr response. +type Rgetattr struct { + // Valid indicates which fields are valid. + Valid AttrMask + + // QID is the QID for this file. + QID + + // Attr is the set of attributes. + Attr Attr +} + +// Decode implements encoder.Decode. +func (r *Rgetattr) Decode(b *buffer) { + r.Valid.Decode(b) + r.QID.Decode(b) + r.Attr.Decode(b) +} + +// Encode implements encoder.Encode. +func (r *Rgetattr) Encode(b *buffer) { + r.Valid.Encode(b) + r.QID.Encode(b) + r.Attr.Encode(b) +} + +// Type implements message.Type. +func (*Rgetattr) Type() MsgType { + return MsgRgetattr +} + +// String implements fmt.Stringer. +func (r *Rgetattr) String() string { + return fmt.Sprintf("Rgetattr{Valid: %v, QID: %s, Attr: %s}", r.Valid, r.QID, r.Attr) +} + +// Tsetattr is a setattr request. +type Tsetattr struct { + // FID is the FID to change. + FID FID + + // Valid is the set of bits which will be used. + Valid SetAttrMask + + // SetAttr is the set request. + SetAttr SetAttr +} + +// Decode implements encoder.Decode. +func (t *Tsetattr) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Valid.Decode(b) + t.SetAttr.Decode(b) +} + +// Encode implements encoder.Encode. +func (t *Tsetattr) Encode(b *buffer) { + b.WriteFID(t.FID) + t.Valid.Encode(b) + t.SetAttr.Encode(b) +} + +// Type implements message.Type. +func (*Tsetattr) Type() MsgType { + return MsgTsetattr +} + +// String implements fmt.Stringer. +func (t *Tsetattr) String() string { + return fmt.Sprintf("Tsetattr{FID: %d, Valid: %v, SetAttr: %s}", t.FID, t.Valid, t.SetAttr) +} + +// Rsetattr is a setattr response. +type Rsetattr struct { +} + +// Decode implements encoder.Decode. +func (*Rsetattr) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rsetattr) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rsetattr) Type() MsgType { + return MsgRsetattr +} + +// String implements fmt.Stringer. +func (r *Rsetattr) String() string { + return fmt.Sprintf("Rsetattr{}") +} + +// Txattrwalk walks extended attributes. +type Txattrwalk struct { + // FID is the FID to check for attributes. + FID FID + + // NewFID is the new FID associated with the attributes. + NewFID FID + + // Name is the attribute name. + Name string +} + +// Decode implements encoder.Decode. +func (t *Txattrwalk) Decode(b *buffer) { + t.FID = b.ReadFID() + t.NewFID = b.ReadFID() + t.Name = b.ReadString() +} + +// Encode implements encoder.Encode. +func (t *Txattrwalk) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteFID(t.NewFID) + b.WriteString(t.Name) +} + +// Type implements message.Type. +func (*Txattrwalk) Type() MsgType { + return MsgTxattrwalk +} + +// String implements fmt.Stringer. +func (t *Txattrwalk) String() string { + return fmt.Sprintf("Txattrwalk{FID: %d, NewFID: %d, Name: %s}", t.FID, t.NewFID, t.Name) +} + +// Rxattrwalk is a xattrwalk response. +type Rxattrwalk struct { + // Size is the size of the extended attribute. + Size uint64 +} + +// Decode implements encoder.Decode. +func (r *Rxattrwalk) Decode(b *buffer) { + r.Size = b.Read64() +} + +// Encode implements encoder.Encode. +func (r *Rxattrwalk) Encode(b *buffer) { + b.Write64(r.Size) +} + +// Type implements message.Type. +func (*Rxattrwalk) Type() MsgType { + return MsgRxattrwalk +} + +// String implements fmt.Stringer. +func (r *Rxattrwalk) String() string { + return fmt.Sprintf("Rxattrwalk{Size: %d}", r.Size) +} + +// Txattrcreate prepare to set extended attributes. +type Txattrcreate struct { + // FID is input/output parameter, it identifies the file on which + // extended attributes will be set but after successful Rxattrcreate + // it is used to write the extended attribute value. + FID FID + + // Name is the attribute name. + Name string + + // Size of the attribute value. When the FID is clunked it has to match + // the number of bytes written to the FID. + AttrSize uint64 + + // Linux setxattr(2) flags. + Flags uint32 +} + +// Decode implements encoder.Decode. +func (t *Txattrcreate) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Name = b.ReadString() + t.AttrSize = b.Read64() + t.Flags = b.Read32() +} + +// Encode implements encoder.Encode. +func (t *Txattrcreate) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteString(t.Name) + b.Write64(t.AttrSize) + b.Write32(t.Flags) +} + +// Type implements message.Type. +func (*Txattrcreate) Type() MsgType { + return MsgTxattrcreate +} + +// String implements fmt.Stringer. +func (t *Txattrcreate) String() string { + return fmt.Sprintf("Txattrcreate{FID: %d, Name: %s, AttrSize: %d, Flags: %d}", t.FID, t.Name, t.AttrSize, t.Flags) +} + +// Rxattrcreate is a xattrcreate response. +type Rxattrcreate struct { +} + +// Decode implements encoder.Decode. +func (r *Rxattrcreate) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (r *Rxattrcreate) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rxattrcreate) Type() MsgType { + return MsgRxattrcreate +} + +// String implements fmt.Stringer. +func (r *Rxattrcreate) String() string { + return fmt.Sprintf("Rxattrcreate{}") +} + +// Treaddir is a readdir request. +type Treaddir struct { + // Directory is the directory FID to read. + Directory FID + + // Offset is the offset to read at. + Offset uint64 + + // Count is the number of bytes to read. + Count uint32 +} + +// Decode implements encoder.Decode. +func (t *Treaddir) Decode(b *buffer) { + t.Directory = b.ReadFID() + t.Offset = b.Read64() + t.Count = b.Read32() +} + +// Encode implements encoder.Encode. +func (t *Treaddir) Encode(b *buffer) { + b.WriteFID(t.Directory) + b.Write64(t.Offset) + b.Write32(t.Count) +} + +// Type implements message.Type. +func (*Treaddir) Type() MsgType { + return MsgTreaddir +} + +// String implements fmt.Stringer. +func (t *Treaddir) String() string { + return fmt.Sprintf("Treaddir{DirectoryFID: %d, Offset: %d, Count: %d}", t.Directory, t.Offset, t.Count) +} + +// Rreaddir is a readdir response. +type Rreaddir struct { + // Count is the byte limit. + // + // This should always be set from the Treaddir request. + Count uint32 + + // Entries are the resulting entries. + // + // This may be constructed in decode. + Entries []Dirent + + // payload is the encoded payload. + // + // This is constructed by encode. + payload []byte +} + +// Decode implements encoder.Decode. +func (r *Rreaddir) Decode(b *buffer) { + r.Count = b.Read32() + entriesBuf := buffer{data: r.payload} + for { + var d Dirent + d.Decode(&entriesBuf) + if entriesBuf.isOverrun() { + // Couldn't decode a complete entry. + break + } + r.Entries = append(r.Entries, d) + } +} + +// Encode implements encoder.Encode. +func (r *Rreaddir) Encode(b *buffer) { + entriesBuf := buffer{} + for _, d := range r.Entries { + d.Encode(&entriesBuf) + if len(entriesBuf.data) >= int(r.Count) { + break + } + } + if len(entriesBuf.data) < int(r.Count) { + r.Count = uint32(len(entriesBuf.data)) + r.payload = entriesBuf.data + } else { + r.payload = entriesBuf.data[:r.Count] + } + b.Write32(uint32(r.Count)) +} + +// Type implements message.Type. +func (*Rreaddir) Type() MsgType { + return MsgRreaddir +} + +// FixedSize implements payloader.FixedSize. +func (*Rreaddir) FixedSize() uint32 { + return 4 +} + +// Payload implements payloader.Payload. +func (r *Rreaddir) Payload() []byte { + return r.payload +} + +// SetPayload implements payloader.SetPayload. +func (r *Rreaddir) SetPayload(p []byte) { + r.payload = p +} + +// String implements fmt.Stringer. +func (r *Rreaddir) String() string { + return fmt.Sprintf("Rreaddir{Count: %d, Entries: %s}", r.Count, r.Entries) +} + +// Tfsync is an fsync request. +type Tfsync struct { + // FID is the fid to sync. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Tfsync) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Tfsync) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Tfsync) Type() MsgType { + return MsgTfsync +} + +// String implements fmt.Stringer. +func (t *Tfsync) String() string { + return fmt.Sprintf("Tfsync{FID: %d}", t.FID) +} + +// Rfsync is an fsync response. +type Rfsync struct { +} + +// Decode implements encoder.Decode. +func (*Rfsync) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rfsync) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rfsync) Type() MsgType { + return MsgRfsync +} + +// String implements fmt.Stringer. +func (r *Rfsync) String() string { + return fmt.Sprintf("Rfsync{}") +} + +// Tstatfs is a stat request. +type Tstatfs struct { + // FID is the root. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Tstatfs) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Tstatfs) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Tstatfs) Type() MsgType { + return MsgTstatfs +} + +// String implements fmt.Stringer. +func (t *Tstatfs) String() string { + return fmt.Sprintf("Tstatfs{FID: %d}", t.FID) +} + +// Rstatfs is the response for a Tstatfs. +type Rstatfs struct { + // FSStat is the stat result. + FSStat FSStat +} + +// Decode implements encoder.Decode. +func (r *Rstatfs) Decode(b *buffer) { + r.FSStat.Decode(b) +} + +// Encode implements encoder.Encode. +func (r *Rstatfs) Encode(b *buffer) { + r.FSStat.Encode(b) +} + +// Type implements message.Type. +func (*Rstatfs) Type() MsgType { + return MsgRstatfs +} + +// String implements fmt.Stringer. +func (r *Rstatfs) String() string { + return fmt.Sprintf("Rstatfs{FSStat: %v}", r.FSStat) +} + +// Tflushf is a flush file request, not to be confused with Tflush. +type Tflushf struct { + // FID is the FID to be flushed. + FID FID +} + +// Decode implements encoder.Decode. +func (t *Tflushf) Decode(b *buffer) { + t.FID = b.ReadFID() +} + +// Encode implements encoder.Encode. +func (t *Tflushf) Encode(b *buffer) { + b.WriteFID(t.FID) +} + +// Type implements message.Type. +func (*Tflushf) Type() MsgType { + return MsgTflushf +} + +// String implements fmt.Stringer. +func (t *Tflushf) String() string { + return fmt.Sprintf("Tflushf{FID: %d}", t.FID) +} + +// Rflushf is a flush file response. +type Rflushf struct { +} + +// Decode implements encoder.Decode. +func (*Rflushf) Decode(b *buffer) { +} + +// Encode implements encoder.Encode. +func (*Rflushf) Encode(b *buffer) { +} + +// Type implements message.Type. +func (*Rflushf) Type() MsgType { + return MsgRflushf +} + +// String implements fmt.Stringer. +func (*Rflushf) String() string { + return fmt.Sprintf("Rflushf{}") +} + +// Twalkgetattr is a walk request. +type Twalkgetattr struct { + // FID is the FID to be walked. + FID FID + + // NewFID is the resulting FID. + NewFID FID + + // Names are the set of names to be walked. + Names []string +} + +// Decode implements encoder.Decode. +func (t *Twalkgetattr) Decode(b *buffer) { + t.FID = b.ReadFID() + t.NewFID = b.ReadFID() + n := b.Read16() + for i := 0; i < int(n); i++ { + t.Names = append(t.Names, b.ReadString()) + } +} + +// Encode implements encoder.Encode. +func (t *Twalkgetattr) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteFID(t.NewFID) + b.Write16(uint16(len(t.Names))) + for _, name := range t.Names { + b.WriteString(name) + } +} + +// Type implements message.Type. +func (*Twalkgetattr) Type() MsgType { + return MsgTwalkgetattr +} + +// String implements fmt.Stringer. +func (t *Twalkgetattr) String() string { + return fmt.Sprintf("Twalkgetattr{FID: %d, NewFID: %d, Names: %v}", t.FID, t.NewFID, t.Names) +} + +// Rwalkgetattr is a walk response. +type Rwalkgetattr struct { + // Valid indicates which fields are valid in the Attr below. + Valid AttrMask + + // Attr is the set of attributes for the last QID (the file walked to). + Attr Attr + + // QIDs are the set of QIDs returned. + QIDs []QID +} + +// Decode implements encoder.Decode. +func (r *Rwalkgetattr) Decode(b *buffer) { + r.Valid.Decode(b) + r.Attr.Decode(b) + n := b.Read16() + for i := 0; i < int(n); i++ { + var q QID + q.Decode(b) + r.QIDs = append(r.QIDs, q) + } +} + +// Encode implements encoder.Encode. +func (r *Rwalkgetattr) Encode(b *buffer) { + r.Valid.Encode(b) + r.Attr.Encode(b) + b.Write16(uint16(len(r.QIDs))) + for _, q := range r.QIDs { + q.Encode(b) + } +} + +// Type implements message.Type. +func (*Rwalkgetattr) Type() MsgType { + return MsgRwalkgetattr +} + +// String implements fmt.Stringer. +func (r *Rwalkgetattr) String() string { + return fmt.Sprintf("Rwalkgetattr{Valid: %s, Attr: %s, QIDs: %v}", r.Valid, r.Attr, r.QIDs) +} + +// Tucreate is a Tlcreate message that includes a UID. +type Tucreate struct { + Tlcreate + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// Decode implements encoder.Decode. +func (t *Tucreate) Decode(b *buffer) { + t.Tlcreate.Decode(b) + t.UID = b.ReadUID() +} + +// Encode implements encoder.Encode. +func (t *Tucreate) Encode(b *buffer) { + t.Tlcreate.Encode(b) + b.WriteUID(t.UID) +} + +// Type implements message.Type. +func (t *Tucreate) Type() MsgType { + return MsgTucreate +} + +// String implements fmt.Stringer. +func (t *Tucreate) String() string { + return fmt.Sprintf("Tucreate{Tlcreate: %v, UID: %d}", &t.Tlcreate, t.UID) +} + +// Rucreate is a file creation response. +type Rucreate struct { + Rlcreate +} + +// Type implements message.Type. +func (*Rucreate) Type() MsgType { + return MsgRucreate +} + +// String implements fmt.Stringer. +func (r *Rucreate) String() string { + return fmt.Sprintf("Rucreate{%v}", &r.Rlcreate) +} + +// Tumkdir is a Tmkdir message that includes a UID. +type Tumkdir struct { + Tmkdir + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// Decode implements encoder.Decode. +func (t *Tumkdir) Decode(b *buffer) { + t.Tmkdir.Decode(b) + t.UID = b.ReadUID() +} + +// Encode implements encoder.Encode. +func (t *Tumkdir) Encode(b *buffer) { + t.Tmkdir.Encode(b) + b.WriteUID(t.UID) +} + +// Type implements message.Type. +func (t *Tumkdir) Type() MsgType { + return MsgTumkdir +} + +// String implements fmt.Stringer. +func (t *Tumkdir) String() string { + return fmt.Sprintf("Tumkdir{Tmkdir: %v, UID: %d}", &t.Tmkdir, t.UID) +} + +// Rumkdir is a umkdir response. +type Rumkdir struct { + Rmkdir +} + +// Type implements message.Type. +func (*Rumkdir) Type() MsgType { + return MsgRumkdir +} + +// String implements fmt.Stringer. +func (r *Rumkdir) String() string { + return fmt.Sprintf("Rumkdir{%v}", &r.Rmkdir) +} + +// Tumknod is a Tmknod message that includes a UID. +type Tumknod struct { + Tmknod + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// Decode implements encoder.Decode. +func (t *Tumknod) Decode(b *buffer) { + t.Tmknod.Decode(b) + t.UID = b.ReadUID() +} + +// Encode implements encoder.Encode. +func (t *Tumknod) Encode(b *buffer) { + t.Tmknod.Encode(b) + b.WriteUID(t.UID) +} + +// Type implements message.Type. +func (t *Tumknod) Type() MsgType { + return MsgTumknod +} + +// String implements fmt.Stringer. +func (t *Tumknod) String() string { + return fmt.Sprintf("Tumknod{Tmknod: %v, UID: %d}", &t.Tmknod, t.UID) +} + +// Rumknod is a umknod response. +type Rumknod struct { + Rmknod +} + +// Type implements message.Type. +func (*Rumknod) Type() MsgType { + return MsgRumknod +} + +// String implements fmt.Stringer. +func (r *Rumknod) String() string { + return fmt.Sprintf("Rumknod{%v}", &r.Rmknod) +} + +// Tusymlink is a Tsymlink message that includes a UID. +type Tusymlink struct { + Tsymlink + + // UID is the UID to use as the effective UID in creation messages. + UID UID +} + +// Decode implements encoder.Decode. +func (t *Tusymlink) Decode(b *buffer) { + t.Tsymlink.Decode(b) + t.UID = b.ReadUID() +} + +// Encode implements encoder.Encode. +func (t *Tusymlink) Encode(b *buffer) { + t.Tsymlink.Encode(b) + b.WriteUID(t.UID) +} + +// Type implements message.Type. +func (t *Tusymlink) Type() MsgType { + return MsgTusymlink +} + +// String implements fmt.Stringer. +func (t *Tusymlink) String() string { + return fmt.Sprintf("Tusymlink{Tsymlink: %v, UID: %d}", &t.Tsymlink, t.UID) +} + +// Rusymlink is a usymlink response. +type Rusymlink struct { + Rsymlink +} + +// Type implements message.Type. +func (*Rusymlink) Type() MsgType { + return MsgRusymlink +} + +// String implements fmt.Stringer. +func (r *Rusymlink) String() string { + return fmt.Sprintf("Rusymlink{%v}", &r.Rsymlink) +} + +// Tlconnect is a connect request. +type Tlconnect struct { + // FID is the FID to be connected. + FID FID + + // Flags are the connect flags. + Flags ConnectFlags +} + +// Decode implements encoder.Decode. +func (t *Tlconnect) Decode(b *buffer) { + t.FID = b.ReadFID() + t.Flags = b.ReadConnectFlags() +} + +// Encode implements encoder.Encode. +func (t *Tlconnect) Encode(b *buffer) { + b.WriteFID(t.FID) + b.WriteConnectFlags(t.Flags) +} + +// Type implements message.Type. +func (*Tlconnect) Type() MsgType { + return MsgTlconnect +} + +// String implements fmt.Stringer. +func (t *Tlconnect) String() string { + return fmt.Sprintf("Tlconnect{FID: %d, Flags: %v}", t.FID, t.Flags) +} + +// Rlconnect is a connect response. +type Rlconnect struct { + // File is a host socket. + File *fd.FD +} + +// Decode implements encoder.Decode. +func (r *Rlconnect) Decode(*buffer) {} + +// Encode implements encoder.Encode. +func (r *Rlconnect) Encode(*buffer) {} + +// Type implements message.Type. +func (*Rlconnect) Type() MsgType { + return MsgRlconnect +} + +// FilePayload returns the file payload. +func (r *Rlconnect) FilePayload() *fd.FD { + return r.File +} + +// SetFilePayload sets the received file. +func (r *Rlconnect) SetFilePayload(file *fd.FD) { + r.File = file +} + +// String implements fmt.Stringer. +func (r *Rlconnect) String() string { + return fmt.Sprintf("Rlconnect{File: %v}", r.File) +} + +// messageRegistry indexes all messages by type. +var messageRegistry = make(map[MsgType]func() message) + +// messageByType creates a new message by type. +// +// An error is returned in the case of an unknown message. +// +// This takes, and ignores, a message tag so that it may be used directly as a +// lookupTagAndType function for recv (by design). +func messageByType(_ Tag, t MsgType) (message, error) { + fn, ok := messageRegistry[t] + if !ok { + return nil, &ErrInvalidMsgType{t} + } + return fn(), nil +} + +// register registers the given message type. +// +// This uses reflection and records only the type. This may cause panic on +// failure and should only be used from init. +func register(m message) { + t := m.Type() + if fn, ok := messageRegistry[t]; ok { + panic(fmt.Sprintf("duplicate message type %d: first is %#v, second is %#v", t, fn(), m)) + } + + to := reflect.ValueOf(m).Elem().Type() + messageRegistry[t] = func() message { + return reflect.New(to).Interface().(message) + } +} + +func calculateSize(m message) uint32 { + if p, ok := m.(payloader); ok { + return p.FixedSize() + } + var dataBuf buffer + m.Encode(&dataBuf) + return uint32(len(dataBuf.data)) +} + +// largestFixedSize is computed within calculateLargestSize. +// +// This is computed so that given some message size M, you can compute +// the maximum payload size (e.g. for Twrite, Rread) with M-largestFixedSize. +// You could do this individual on a per-message basis, but it's easier to +// compute a single maximum safe payload. +var largestFixedSize uint32 + +// calculateLargestFixedSize is called from within init. +func calculateLargestFixedSize() { + for _, fn := range messageRegistry { + if size := calculateSize(fn()); size > largestFixedSize { + largestFixedSize = size + } + } +} + +func init() { + register(&Rlerror{}) + register(&Tstatfs{}) + register(&Rstatfs{}) + register(&Tlopen{}) + register(&Rlopen{}) + register(&Tlcreate{}) + register(&Rlcreate{}) + register(&Tsymlink{}) + register(&Rsymlink{}) + register(&Tmknod{}) + register(&Rmknod{}) + register(&Trename{}) + register(&Rrename{}) + register(&Treadlink{}) + register(&Rreadlink{}) + register(&Tgetattr{}) + register(&Rgetattr{}) + register(&Tsetattr{}) + register(&Rsetattr{}) + register(&Txattrwalk{}) + register(&Rxattrwalk{}) + register(&Txattrcreate{}) + register(&Rxattrcreate{}) + register(&Treaddir{}) + register(&Rreaddir{}) + register(&Tfsync{}) + register(&Rfsync{}) + register(&Tlink{}) + register(&Rlink{}) + register(&Tmkdir{}) + register(&Rmkdir{}) + register(&Trenameat{}) + register(&Rrenameat{}) + register(&Tunlinkat{}) + register(&Runlinkat{}) + register(&Tversion{}) + register(&Rversion{}) + register(&Tauth{}) + register(&Rauth{}) + register(&Tattach{}) + register(&Rattach{}) + register(&Tflush{}) + register(&Rflush{}) + register(&Twalk{}) + register(&Rwalk{}) + register(&Tread{}) + register(&Rread{}) + register(&Twrite{}) + register(&Rwrite{}) + register(&Tclunk{}) + register(&Rclunk{}) + register(&Tremove{}) + register(&Rremove{}) + register(&Tflushf{}) + register(&Rflushf{}) + register(&Twalkgetattr{}) + register(&Rwalkgetattr{}) + register(&Tucreate{}) + register(&Rucreate{}) + register(&Tumkdir{}) + register(&Rumkdir{}) + register(&Tumknod{}) + register(&Rumknod{}) + register(&Tusymlink{}) + register(&Rusymlink{}) + register(&Tlconnect{}) + register(&Rlconnect{}) + + calculateLargestFixedSize() +} diff --git a/pkg/p9/messages_test.go b/pkg/p9/messages_test.go new file mode 100644 index 000000000..f353755f1 --- /dev/null +++ b/pkg/p9/messages_test.go @@ -0,0 +1,391 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "reflect" + "testing" +) + +func TestEncodeDecode(t *testing.T) { + objs := []encoder{ + &QID{ + Type: 1, + Version: 2, + Path: 3, + }, + &FSStat{ + Type: 1, + BlockSize: 2, + Blocks: 3, + BlocksFree: 4, + BlocksAvailable: 5, + Files: 6, + FilesFree: 7, + FSID: 8, + NameLength: 9, + }, + &AttrMask{ + Mode: true, + NLink: true, + UID: true, + GID: true, + RDev: true, + ATime: true, + MTime: true, + CTime: true, + INo: true, + Size: true, + Blocks: true, + BTime: true, + Gen: true, + DataVersion: true, + }, + &Attr{ + Mode: Exec, + UID: 2, + GID: 3, + NLink: 4, + RDev: 5, + Size: 6, + BlockSize: 7, + Blocks: 8, + ATimeSeconds: 9, + ATimeNanoSeconds: 10, + MTimeSeconds: 11, + MTimeNanoSeconds: 12, + CTimeSeconds: 13, + CTimeNanoSeconds: 14, + BTimeSeconds: 15, + BTimeNanoSeconds: 16, + Gen: 17, + DataVersion: 18, + }, + &SetAttrMask{ + Permissions: true, + UID: true, + GID: true, + Size: true, + ATime: true, + MTime: true, + CTime: true, + ATimeNotSystemTime: true, + MTimeNotSystemTime: true, + }, + &SetAttr{ + Permissions: 1, + UID: 2, + GID: 3, + Size: 4, + ATimeSeconds: 5, + ATimeNanoSeconds: 6, + MTimeSeconds: 7, + MTimeNanoSeconds: 8, + }, + &Dirent{ + QID: QID{Type: 1}, + Offset: 2, + Type: 3, + Name: "a", + }, + &Rlerror{ + Error: 1, + }, + &Tstatfs{ + FID: 1, + }, + &Rstatfs{ + FSStat: FSStat{Type: 1}, + }, + &Tlopen{ + FID: 1, + Flags: WriteOnly, + }, + &Rlopen{ + QID: QID{Type: 1}, + IoUnit: 2, + }, + &Tlconnect{ + FID: 1, + }, + &Rlconnect{}, + &Tlcreate{ + FID: 1, + Name: "a", + OpenFlags: 2, + Permissions: 3, + GID: 4, + }, + &Rlcreate{ + Rlopen{QID: QID{Type: 1}}, + }, + &Tsymlink{ + Directory: 1, + Name: "a", + Target: "b", + GID: 2, + }, + &Rsymlink{ + QID: QID{Type: 1}, + }, + &Tmknod{ + Directory: 1, + Name: "a", + Permissions: 2, + Major: 3, + Minor: 4, + GID: 5, + }, + &Rmknod{ + QID: QID{Type: 1}, + }, + &Trename{ + FID: 1, + Directory: 2, + Name: "a", + }, + &Rrename{}, + &Treadlink{ + FID: 1, + }, + &Rreadlink{ + Target: "a", + }, + &Tgetattr{ + FID: 1, + AttrMask: AttrMask{Mode: true}, + }, + &Rgetattr{ + Valid: AttrMask{Mode: true}, + QID: QID{Type: 1}, + Attr: Attr{Mode: Write}, + }, + &Tsetattr{ + FID: 1, + Valid: SetAttrMask{Permissions: true}, + SetAttr: SetAttr{Permissions: Write}, + }, + &Rsetattr{}, + &Txattrwalk{ + FID: 1, + NewFID: 2, + Name: "a", + }, + &Rxattrwalk{ + Size: 1, + }, + &Treaddir{ + Directory: 1, + Offset: 2, + Count: 3, + }, + &Rreaddir{ + // Count must be sufficient to encode a dirent. + Count: 0x18, + Entries: []Dirent{{QID: QID{Type: 2}}}, + }, + &Tfsync{ + FID: 1, + }, + &Rfsync{}, + &Tlink{ + Directory: 1, + Target: 2, + Name: "a", + }, + &Rlink{}, + &Tmkdir{ + Directory: 1, + Name: "a", + Permissions: 2, + GID: 3, + }, + &Rmkdir{ + QID: QID{Type: 1}, + }, + &Trenameat{ + OldDirectory: 1, + OldName: "a", + NewDirectory: 2, + NewName: "b", + }, + &Rrenameat{}, + &Tunlinkat{ + Directory: 1, + Name: "a", + Flags: 2, + }, + &Runlinkat{}, + &Tversion{ + MSize: 1, + Version: "a", + }, + &Rversion{ + MSize: 1, + Version: "a", + }, + &Tauth{ + AuthenticationFID: 1, + UserName: "a", + AttachName: "b", + UID: 2, + }, + &Rauth{ + QID: QID{Type: 1}, + }, + &Tattach{ + FID: 1, + Auth: Tauth{AuthenticationFID: 2}, + }, + &Rattach{ + QID: QID{Type: 1}, + }, + &Tflush{ + OldTag: 1, + }, + &Rflush{}, + &Twalk{ + FID: 1, + NewFID: 2, + Names: []string{"a"}, + }, + &Rwalk{ + QIDs: []QID{{Type: 1}}, + }, + &Tread{ + FID: 1, + Offset: 2, + Count: 3, + }, + &Rread{ + Data: []byte{'a'}, + }, + &Twrite{ + FID: 1, + Offset: 2, + Data: []byte{'a'}, + }, + &Rwrite{ + Count: 1, + }, + &Tclunk{ + FID: 1, + }, + &Rclunk{}, + &Tremove{ + FID: 1, + }, + &Rremove{}, + &Tflushf{ + FID: 1, + }, + &Rflushf{}, + &Twalkgetattr{ + FID: 1, + NewFID: 2, + Names: []string{"a"}, + }, + &Rwalkgetattr{ + QIDs: []QID{{Type: 1}}, + Valid: AttrMask{Mode: true}, + Attr: Attr{Mode: Write}, + }, + &Tucreate{ + Tlcreate: Tlcreate{ + FID: 1, + Name: "a", + OpenFlags: 2, + Permissions: 3, + GID: 4, + }, + UID: 5, + }, + &Rucreate{ + Rlcreate{Rlopen{QID: QID{Type: 1}}}, + }, + &Tumkdir{ + Tmkdir: Tmkdir{ + Directory: 1, + Name: "a", + Permissions: 2, + GID: 3, + }, + UID: 4, + }, + &Rumkdir{ + Rmkdir{QID: QID{Type: 1}}, + }, + &Tusymlink{ + Tsymlink: Tsymlink{ + Directory: 1, + Name: "a", + Target: "b", + GID: 2, + }, + UID: 3, + }, + &Rusymlink{ + Rsymlink{QID: QID{Type: 1}}, + }, + &Tumknod{ + Tmknod: Tmknod{ + Directory: 1, + Name: "a", + Permissions: 2, + Major: 3, + Minor: 4, + GID: 5, + }, + UID: 6, + }, + &Rumknod{ + Rmknod{QID: QID{Type: 1}}, + }, + } + + for _, enc := range objs { + // Encode the original. + data := make([]byte, initialBufferLength) + buf := buffer{data: data[:0]} + enc.Encode(&buf) + + // Create a new object, same as the first. + enc2 := reflect.New(reflect.ValueOf(enc).Elem().Type()).Interface().(encoder) + buf2 := buffer{data: buf.data} + + // To be fair, we need to add any payloads (directly). + if pl, ok := enc.(payloader); ok { + enc2.(payloader).SetPayload(pl.Payload()) + } + + // And any file payloads (directly). + if fl, ok := enc.(filer); ok { + enc2.(filer).SetFilePayload(fl.FilePayload()) + } + + // Mark sure it was okay. + enc2.Decode(&buf2) + if buf2.isOverrun() { + t.Errorf("object %#v->%#v got overrun on decode", enc, enc2) + continue + } + + // Check that they are equal. + if !reflect.DeepEqual(enc, enc2) { + t.Errorf("object %#v and %#v differ", enc, enc2) + continue + } + } +} diff --git a/pkg/p9/p9.go b/pkg/p9/p9.go new file mode 100644 index 000000000..c6899c3ce --- /dev/null +++ b/pkg/p9/p9.go @@ -0,0 +1,1023 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package p9 is a 9P2000.L implementation. +package p9 + +import ( + "fmt" + "math" + "os" + "strings" + "sync/atomic" + "syscall" +) + +// OpenFlags is the mode passed to Open and Create operations. +// +// These correspond to bits sent over the wire. +type OpenFlags uint32 + +const ( + // ReadOnly is a Topen and Tcreate flag indicating read-only mode. + ReadOnly OpenFlags = 0 + + // WriteOnly is a Topen and Tcreate flag indicating write-only mode. + WriteOnly OpenFlags = 1 + + // ReadWrite is a Topen flag indicates read-write mode. + ReadWrite OpenFlags = 2 + + // OpenFlagsModeMask is a mask of valid OpenFlags mode bits. + OpenFlagsModeMask OpenFlags = 3 +) + +// ConnectFlags is the mode passed to Connect operations. +// +// These correspond to bits sent over the wire. +type ConnectFlags uint32 + +const ( + // StreamSocket is a Tlconnect flag indicating SOCK_STREAM mode. + StreamSocket ConnectFlags = 0 + + // DgramSocket is a Tlconnect flag indicating SOCK_DGRAM mode. + DgramSocket ConnectFlags = 1 + + // SeqpacketSocket is a Tlconnect flag indicating SOCK_SEQPACKET mode. + SeqpacketSocket ConnectFlags = 2 + + // AnonymousSocket is a Tlconnect flag indicating that the mode does not + // matter and that the requester will accept any socket type. + AnonymousSocket ConnectFlags = 3 +) + +// OSFlags converts a p9.OpenFlags to an int compatible with open(2). +func (o OpenFlags) OSFlags() int { + return int(o & OpenFlagsModeMask) +} + +// String implements fmt.Stringer. +func (o OpenFlags) String() string { + switch o { + case ReadOnly: + return "ReadOnly" + case WriteOnly: + return "WriteOnly" + case ReadWrite: + return "ReadWrite" + case OpenFlagsModeMask: + return "OpenFlagsModeMask" + default: + return "UNDEFINED" + } +} + +// Tag is a messsage tag. +type Tag uint16 + +// FID is a file identifier. +type FID uint64 + +// FileMode are flags corresponding to file modes. +// +// These correspond to bits sent over the wire. +// These also correspond to mode_t bits. +type FileMode uint32 + +const ( + // FileModeMask is a mask of all the file mode bits of FileMode. + FileModeMask FileMode = 0170000 + + // ModeSocket is an (unused) mode bit for a socket. + ModeSocket FileMode = 0140000 + + // ModeSymlink is a mode bit for a symlink. + ModeSymlink FileMode = 0120000 + + // ModeRegular is a mode bit for regular files. + ModeRegular FileMode = 0100000 + + // ModeBlockDevice is a mode bit for block devices. + ModeBlockDevice FileMode = 060000 + + // ModeDirectory is a mode bit for directories. + ModeDirectory FileMode = 040000 + + // ModeCharacterDevice is a mode bit for a character device. + ModeCharacterDevice FileMode = 020000 + + // ModeNamedPipe is a mode bit for a named pipe. + ModeNamedPipe FileMode = 010000 + + // Read is a mode bit indicating read permission. + Read FileMode = 04 + + // Write is a mode bit indicating write permission. + Write FileMode = 02 + + // Exec is a mode bit indicating exec permission. + Exec FileMode = 01 + + // PermissionsMask is the mask to apply to FileModes for permissions. + PermissionsMask FileMode = 0777 +) + +// QIDType is the most significant byte of the FileMode word, to be used as the +// Type field of p9.QID. +func (m FileMode) QIDType() QIDType { + switch { + case m.IsDir(): + return TypeDir + case m.IsSocket(), m.IsNamedPipe(), m.IsCharacterDevice(): + // Best approximation. + return TypeAppendOnly + case m.IsSymlink(): + return TypeSymlink + default: + return TypeRegular + } +} + +// FileType returns the file mode without the permission bits. +func (m FileMode) FileType() FileMode { + return m & FileModeMask +} + +// Permissions returns just the permission bits of the mode. +func (m FileMode) Permissions() FileMode { + return m & PermissionsMask +} + +// Writable returns the mode with write bits added. +func (m FileMode) Writable() FileMode { + return m | 0222 +} + +// IsReadable returns true if m represents a file that can be read. +func (m FileMode) IsReadable() bool { + return m&0444 != 0 +} + +// IsWritable returns true if m represents a file that can be written to. +func (m FileMode) IsWritable() bool { + return m&0222 != 0 +} + +// IsExecutable returns true if m represents a file that can be executed. +func (m FileMode) IsExecutable() bool { + return m&0111 != 0 +} + +// IsRegular returns true if m is a regular file. +func (m FileMode) IsRegular() bool { + return m&FileModeMask == ModeRegular +} + +// IsDir returns true if m represents a directory. +func (m FileMode) IsDir() bool { + return m&FileModeMask == ModeDirectory +} + +// IsNamedPipe returns true if m represents a named pipe. +func (m FileMode) IsNamedPipe() bool { + return m&FileModeMask == ModeNamedPipe +} + +// IsCharacterDevice returns true if m represents a character device. +func (m FileMode) IsCharacterDevice() bool { + return m&FileModeMask == ModeCharacterDevice +} + +// IsBlockDevice returns true if m represents a character device. +func (m FileMode) IsBlockDevice() bool { + return m&FileModeMask == ModeBlockDevice +} + +// IsSocket returns true if m represents a socket. +func (m FileMode) IsSocket() bool { + return m&FileModeMask == ModeSocket +} + +// IsSymlink returns true if m represents a symlink. +func (m FileMode) IsSymlink() bool { + return m&FileModeMask == ModeSymlink +} + +// ModeFromOS returns a FileMode from an os.FileMode. +func ModeFromOS(mode os.FileMode) FileMode { + m := FileMode(mode.Perm()) + switch { + case mode.IsDir(): + m |= ModeDirectory + case mode&os.ModeSymlink != 0: + m |= ModeSymlink + case mode&os.ModeSocket != 0: + m |= ModeSocket + case mode&os.ModeNamedPipe != 0: + m |= ModeNamedPipe + case mode&os.ModeCharDevice != 0: + m |= ModeCharacterDevice + case mode&os.ModeDevice != 0: + m |= ModeBlockDevice + default: + m |= ModeRegular + } + return m +} + +// OSMode converts a p9.FileMode to an os.FileMode. +func (m FileMode) OSMode() os.FileMode { + var osMode os.FileMode + osMode |= os.FileMode(m.Permissions()) + switch { + case m.IsDir(): + osMode |= os.ModeDir + case m.IsSymlink(): + osMode |= os.ModeSymlink + case m.IsSocket(): + osMode |= os.ModeSocket + case m.IsNamedPipe(): + osMode |= os.ModeNamedPipe + case m.IsCharacterDevice(): + osMode |= os.ModeCharDevice | os.ModeDevice + case m.IsBlockDevice(): + osMode |= os.ModeDevice + } + return osMode +} + +// UID represents a user ID. +type UID uint32 + +// Ok returns true if uid is not NoUID. +func (uid UID) Ok() bool { + return uid != NoUID +} + +// GID represents a group ID. +type GID uint32 + +// Ok returns true if gid is not NoGID. +func (gid GID) Ok() bool { + return gid != NoGID +} + +const ( + // NoTag is a sentinel used to indicate no valid tag. + NoTag Tag = math.MaxUint16 + + // NoFID is a sentinel used to indicate no valid FID. + NoFID FID = math.MaxUint32 + + // NoUID is a sentinel used to indicate no valid UID. + NoUID UID = math.MaxUint32 + + // NoGID is a sentinel used to indicate no valid GID. + NoGID GID = math.MaxUint32 +) + +// MsgType is a type identifier. +type MsgType uint8 + +// MsgType declarations. +const ( + MsgTlerror MsgType = 6 + MsgRlerror = 7 + MsgTstatfs = 8 + MsgRstatfs = 9 + MsgTlopen = 12 + MsgRlopen = 13 + MsgTlcreate = 14 + MsgRlcreate = 15 + MsgTsymlink = 16 + MsgRsymlink = 17 + MsgTmknod = 18 + MsgRmknod = 19 + MsgTrename = 20 + MsgRrename = 21 + MsgTreadlink = 22 + MsgRreadlink = 23 + MsgTgetattr = 24 + MsgRgetattr = 25 + MsgTsetattr = 26 + MsgRsetattr = 27 + MsgTxattrwalk = 30 + MsgRxattrwalk = 31 + MsgTxattrcreate = 32 + MsgRxattrcreate = 33 + MsgTreaddir = 40 + MsgRreaddir = 41 + MsgTfsync = 50 + MsgRfsync = 51 + MsgTlink = 70 + MsgRlink = 71 + MsgTmkdir = 72 + MsgRmkdir = 73 + MsgTrenameat = 74 + MsgRrenameat = 75 + MsgTunlinkat = 76 + MsgRunlinkat = 77 + MsgTversion = 100 + MsgRversion = 101 + MsgTauth = 102 + MsgRauth = 103 + MsgTattach = 104 + MsgRattach = 105 + MsgTflush = 108 + MsgRflush = 109 + MsgTwalk = 110 + MsgRwalk = 111 + MsgTread = 116 + MsgRread = 117 + MsgTwrite = 118 + MsgRwrite = 119 + MsgTclunk = 120 + MsgRclunk = 121 + MsgTremove = 122 + MsgRremove = 123 + MsgTflushf = 124 + MsgRflushf = 125 + MsgTwalkgetattr = 126 + MsgRwalkgetattr = 127 + MsgTucreate = 128 + MsgRucreate = 129 + MsgTumkdir = 130 + MsgRumkdir = 131 + MsgTumknod = 132 + MsgRumknod = 133 + MsgTusymlink = 134 + MsgRusymlink = 135 + MsgTlconnect = 136 + MsgRlconnect = 137 +) + +// QIDType represents the file type for QIDs. +// +// QIDType corresponds to the high 8 bits of a Plan 9 file mode. +type QIDType uint8 + +const ( + // TypeDir represents a directory type. + TypeDir QIDType = 0x80 + + // TypeAppendOnly represents an append only file. + TypeAppendOnly QIDType = 0x40 + + // TypeExclusive represents an exclusive-use file. + TypeExclusive QIDType = 0x20 + + // TypeMount represents a mounted channel. + TypeMount QIDType = 0x10 + + // TypeAuth represents an authentication file. + TypeAuth QIDType = 0x08 + + // TypeTemporary represents a temporary file. + TypeTemporary QIDType = 0x04 + + // TypeSymlink represents a symlink. + TypeSymlink QIDType = 0x02 + + // TypeLink represents a hard link. + TypeLink QIDType = 0x01 + + // TypeRegular represents a regular file. + TypeRegular QIDType = 0x00 +) + +// QID is a unique file identifier. +// +// This may be embedded in other requests and responses. +type QID struct { + // Type is the highest order byte of the file mode. + Type QIDType + + // Version is an arbitrary server version number. + Version uint32 + + // Path is a unique server identifier for this path (e.g. inode). + Path uint64 +} + +// String implements fmt.Stringer. +func (q QID) String() string { + return fmt.Sprintf("QID{Type: %d, Version: %d, Path: %d}", q.Type, q.Version, q.Path) +} + +// Decode implements encoder.Decode. +func (q *QID) Decode(b *buffer) { + q.Type = b.ReadQIDType() + q.Version = b.Read32() + q.Path = b.Read64() +} + +// Encode implements encoder.Encode. +func (q *QID) Encode(b *buffer) { + b.WriteQIDType(q.Type) + b.Write32(q.Version) + b.Write64(q.Path) +} + +// QIDGenerator is a simple generator for QIDs that atomically increments Path +// values. +type QIDGenerator struct { + // uids is an ever increasing value that can be atomically incremented + // to provide unique Path values for QIDs. + uids uint64 +} + +// Get returns a new 9P unique ID with a unique Path given a QID type. +// +// While the 9P spec allows Version to be incremented every time the file is +// modified, we currently do not use the Version member for anything. Hence, +// it is set to 0. +func (q *QIDGenerator) Get(t QIDType) QID { + return QID{ + Type: t, + Version: 0, + Path: atomic.AddUint64(&q.uids, 1), + } +} + +// FSStat is used by statfs. +type FSStat struct { + // Type is the filesystem type. + Type uint32 + + // BlockSize is the blocksize. + BlockSize uint32 + + // Blocks is the number of blocks. + Blocks uint64 + + // BlocksFree is the number of free blocks. + BlocksFree uint64 + + // BlocksAvailable is the number of blocks *available*. + BlocksAvailable uint64 + + // Files is the number of files available. + Files uint64 + + // FilesFree is the number of free file nodes. + FilesFree uint64 + + // FSID is the filesystem ID. + FSID uint64 + + // NameLength is the maximum name length. + NameLength uint32 +} + +// Decode implements encoder.Decode. +func (f *FSStat) Decode(b *buffer) { + f.Type = b.Read32() + f.BlockSize = b.Read32() + f.Blocks = b.Read64() + f.BlocksFree = b.Read64() + f.BlocksAvailable = b.Read64() + f.Files = b.Read64() + f.FilesFree = b.Read64() + f.FSID = b.Read64() + f.NameLength = b.Read32() +} + +// Encode implements encoder.Encode. +func (f *FSStat) Encode(b *buffer) { + b.Write32(f.Type) + b.Write32(f.BlockSize) + b.Write64(f.Blocks) + b.Write64(f.BlocksFree) + b.Write64(f.BlocksAvailable) + b.Write64(f.Files) + b.Write64(f.FilesFree) + b.Write64(f.FSID) + b.Write32(f.NameLength) +} + +// AttrMask is a mask of attributes for getattr. +type AttrMask struct { + Mode bool + NLink bool + UID bool + GID bool + RDev bool + ATime bool + MTime bool + CTime bool + INo bool + Size bool + Blocks bool + BTime bool + Gen bool + DataVersion bool +} + +// Contains returns true if a contains all of the attributes masked as b. +func (a AttrMask) Contains(b AttrMask) bool { + if b.Mode && !a.Mode { + return false + } + if b.NLink && !a.NLink { + return false + } + if b.UID && !a.UID { + return false + } + if b.GID && !a.GID { + return false + } + if b.RDev && !a.RDev { + return false + } + if b.ATime && !a.ATime { + return false + } + if b.MTime && !a.MTime { + return false + } + if b.CTime && !a.CTime { + return false + } + if b.INo && !a.INo { + return false + } + if b.Size && !a.Size { + return false + } + if b.Blocks && !a.Blocks { + return false + } + if b.BTime && !a.BTime { + return false + } + if b.Gen && !a.Gen { + return false + } + if b.DataVersion && !a.DataVersion { + return false + } + return true +} + +// Empty returns true if no fields are masked. +func (a AttrMask) Empty() bool { + return !a.Mode && !a.NLink && !a.UID && !a.GID && !a.RDev && !a.ATime && !a.MTime && !a.CTime && !a.INo && !a.Size && !a.Blocks && !a.BTime && !a.Gen && !a.DataVersion +} + +// AttrMaskAll returns an AttrMask with all fields masked. +func AttrMaskAll() AttrMask { + return AttrMask{ + Mode: true, + NLink: true, + UID: true, + GID: true, + RDev: true, + ATime: true, + MTime: true, + CTime: true, + INo: true, + Size: true, + Blocks: true, + BTime: true, + Gen: true, + DataVersion: true, + } +} + +// String implements fmt.Stringer. +func (a AttrMask) String() string { + var masks []string + if a.Mode { + masks = append(masks, "Mode") + } + if a.NLink { + masks = append(masks, "NLink") + } + if a.UID { + masks = append(masks, "UID") + } + if a.GID { + masks = append(masks, "GID") + } + if a.RDev { + masks = append(masks, "RDev") + } + if a.ATime { + masks = append(masks, "ATime") + } + if a.MTime { + masks = append(masks, "MTime") + } + if a.CTime { + masks = append(masks, "CTime") + } + if a.INo { + masks = append(masks, "INo") + } + if a.Size { + masks = append(masks, "Size") + } + if a.Blocks { + masks = append(masks, "Blocks") + } + if a.BTime { + masks = append(masks, "BTime") + } + if a.Gen { + masks = append(masks, "Gen") + } + if a.DataVersion { + masks = append(masks, "DataVersion") + } + return fmt.Sprintf("AttrMask{with: %s}", strings.Join(masks, " ")) +} + +// Decode implements encoder.Decode. +func (a *AttrMask) Decode(b *buffer) { + mask := b.Read64() + a.Mode = mask&0x00000001 != 0 + a.NLink = mask&0x00000002 != 0 + a.UID = mask&0x00000004 != 0 + a.GID = mask&0x00000008 != 0 + a.RDev = mask&0x00000010 != 0 + a.ATime = mask&0x00000020 != 0 + a.MTime = mask&0x00000040 != 0 + a.CTime = mask&0x00000080 != 0 + a.INo = mask&0x00000100 != 0 + a.Size = mask&0x00000200 != 0 + a.Blocks = mask&0x00000400 != 0 + a.BTime = mask&0x00000800 != 0 + a.Gen = mask&0x00001000 != 0 + a.DataVersion = mask&0x00002000 != 0 +} + +// Encode implements encoder.Encode. +func (a *AttrMask) Encode(b *buffer) { + var mask uint64 + if a.Mode { + mask |= 0x00000001 + } + if a.NLink { + mask |= 0x00000002 + } + if a.UID { + mask |= 0x00000004 + } + if a.GID { + mask |= 0x00000008 + } + if a.RDev { + mask |= 0x00000010 + } + if a.ATime { + mask |= 0x00000020 + } + if a.MTime { + mask |= 0x00000040 + } + if a.CTime { + mask |= 0x00000080 + } + if a.INo { + mask |= 0x00000100 + } + if a.Size { + mask |= 0x00000200 + } + if a.Blocks { + mask |= 0x00000400 + } + if a.BTime { + mask |= 0x00000800 + } + if a.Gen { + mask |= 0x00001000 + } + if a.DataVersion { + mask |= 0x00002000 + } + b.Write64(mask) +} + +// Attr is a set of attributes for getattr. +type Attr struct { + Mode FileMode + UID UID + GID GID + NLink uint64 + RDev uint64 + Size uint64 + BlockSize uint64 + Blocks uint64 + ATimeSeconds uint64 + ATimeNanoSeconds uint64 + MTimeSeconds uint64 + MTimeNanoSeconds uint64 + CTimeSeconds uint64 + CTimeNanoSeconds uint64 + BTimeSeconds uint64 + BTimeNanoSeconds uint64 + Gen uint64 + DataVersion uint64 +} + +// String implements fmt.Stringer. +func (a Attr) String() string { + return fmt.Sprintf("Attr{Mode: 0o%o, UID: %d, GID: %d, NLink: %d, RDev: %d, Size: %d, BlockSize: %d, Blocks: %d, ATime: {Sec: %d, NanoSec: %d}, MTime: {Sec: %d, NanoSec: %d}, CTime: {Sec: %d, NanoSec: %d}, BTime: {Sec: %d, NanoSec: %d}, Gen: %d, DataVersion: %d}", + a.Mode, a.UID, a.GID, a.NLink, a.RDev, a.Size, a.BlockSize, a.Blocks, a.ATimeSeconds, a.ATimeNanoSeconds, a.MTimeSeconds, a.MTimeNanoSeconds, a.CTimeSeconds, a.CTimeNanoSeconds, a.BTimeSeconds, a.BTimeNanoSeconds, a.Gen, a.DataVersion) +} + +// Encode implements encoder.Encode. +func (a *Attr) Encode(b *buffer) { + b.WriteFileMode(a.Mode) + b.WriteUID(a.UID) + b.WriteGID(a.GID) + b.Write64(a.NLink) + b.Write64(a.RDev) + b.Write64(a.Size) + b.Write64(a.BlockSize) + b.Write64(a.Blocks) + b.Write64(a.ATimeSeconds) + b.Write64(a.ATimeNanoSeconds) + b.Write64(a.MTimeSeconds) + b.Write64(a.MTimeNanoSeconds) + b.Write64(a.CTimeSeconds) + b.Write64(a.CTimeNanoSeconds) + b.Write64(a.BTimeSeconds) + b.Write64(a.BTimeNanoSeconds) + b.Write64(a.Gen) + b.Write64(a.DataVersion) +} + +// Decode implements encoder.Decode. +func (a *Attr) Decode(b *buffer) { + a.Mode = b.ReadFileMode() + a.UID = b.ReadUID() + a.GID = b.ReadGID() + a.NLink = b.Read64() + a.RDev = b.Read64() + a.Size = b.Read64() + a.BlockSize = b.Read64() + a.Blocks = b.Read64() + a.ATimeSeconds = b.Read64() + a.ATimeNanoSeconds = b.Read64() + a.MTimeSeconds = b.Read64() + a.MTimeNanoSeconds = b.Read64() + a.CTimeSeconds = b.Read64() + a.CTimeNanoSeconds = b.Read64() + a.BTimeSeconds = b.Read64() + a.BTimeNanoSeconds = b.Read64() + a.Gen = b.Read64() + a.DataVersion = b.Read64() +} + +// StatToAttr converts a Linux syscall stat structure to an Attr. +func StatToAttr(s *syscall.Stat_t, req AttrMask) (Attr, AttrMask) { + attr := Attr{ + UID: NoUID, + GID: NoGID, + } + if req.Mode { + // p9.FileMode corresponds to Linux mode_t. + attr.Mode = FileMode(s.Mode) + } + if req.NLink { + attr.NLink = s.Nlink + } + if req.UID { + attr.UID = UID(s.Uid) + } + if req.GID { + attr.GID = GID(s.Gid) + } + if req.RDev { + attr.RDev = s.Dev + } + if req.ATime { + attr.ATimeSeconds = uint64(s.Atim.Sec) + attr.ATimeNanoSeconds = uint64(s.Atim.Nsec) + } + if req.MTime { + attr.MTimeSeconds = uint64(s.Mtim.Sec) + attr.MTimeNanoSeconds = uint64(s.Mtim.Nsec) + } + if req.CTime { + attr.CTimeSeconds = uint64(s.Ctim.Sec) + attr.CTimeNanoSeconds = uint64(s.Ctim.Nsec) + } + if req.Size { + attr.Size = uint64(s.Size) + } + if req.Blocks { + attr.BlockSize = uint64(s.Blksize) + attr.Blocks = uint64(s.Blocks) + } + + // Use the req field because we already have it. + req.BTime = false + req.Gen = false + req.DataVersion = false + + return attr, req +} + +// SetAttrMask specifies a valid mask for setattr. +type SetAttrMask struct { + Permissions bool + UID bool + GID bool + Size bool + ATime bool + MTime bool + CTime bool + ATimeNotSystemTime bool + MTimeNotSystemTime bool +} + +// IsSubsetOf returns whether s is a subset of m. +func (s SetAttrMask) IsSubsetOf(m SetAttrMask) bool { + sb := s.bitmask() + sm := m.bitmask() + return sm|sb == sm +} + +// String implements fmt.Stringer. +func (s SetAttrMask) String() string { + var masks []string + if s.Permissions { + masks = append(masks, "Permissions") + } + if s.UID { + masks = append(masks, "UID") + } + if s.GID { + masks = append(masks, "GID") + } + if s.Size { + masks = append(masks, "Size") + } + if s.ATime { + masks = append(masks, "ATime") + } + if s.MTime { + masks = append(masks, "MTime") + } + if s.CTime { + masks = append(masks, "CTime") + } + if s.ATimeNotSystemTime { + masks = append(masks, "ATimeNotSystemTime") + } + if s.MTimeNotSystemTime { + masks = append(masks, "MTimeNotSystemTime") + } + return fmt.Sprintf("SetAttrMask{with: %s}", strings.Join(masks, " ")) +} + +// Empty returns true if no fields are masked. +func (s SetAttrMask) Empty() bool { + return !s.Permissions && !s.UID && !s.GID && !s.Size && !s.ATime && !s.MTime && !s.CTime && !s.ATimeNotSystemTime && !s.MTimeNotSystemTime +} + +// Decode implements encoder.Decode. +func (s *SetAttrMask) Decode(b *buffer) { + mask := b.Read32() + s.Permissions = mask&0x00000001 != 0 + s.UID = mask&0x00000002 != 0 + s.GID = mask&0x00000004 != 0 + s.Size = mask&0x00000008 != 0 + s.ATime = mask&0x00000010 != 0 + s.MTime = mask&0x00000020 != 0 + s.CTime = mask&0x00000040 != 0 + s.ATimeNotSystemTime = mask&0x00000080 != 0 + s.MTimeNotSystemTime = mask&0x00000100 != 0 +} + +func (s SetAttrMask) bitmask() uint32 { + var mask uint32 + if s.Permissions { + mask |= 0x00000001 + } + if s.UID { + mask |= 0x00000002 + } + if s.GID { + mask |= 0x00000004 + } + if s.Size { + mask |= 0x00000008 + } + if s.ATime { + mask |= 0x00000010 + } + if s.MTime { + mask |= 0x00000020 + } + if s.CTime { + mask |= 0x00000040 + } + if s.ATimeNotSystemTime { + mask |= 0x00000080 + } + if s.MTimeNotSystemTime { + mask |= 0x00000100 + } + return mask +} + +// Encode implements encoder.Encode. +func (s *SetAttrMask) Encode(b *buffer) { + b.Write32(s.bitmask()) +} + +// SetAttr specifies a set of attributes for a setattr. +type SetAttr struct { + Permissions FileMode + UID UID + GID GID + Size uint64 + ATimeSeconds uint64 + ATimeNanoSeconds uint64 + MTimeSeconds uint64 + MTimeNanoSeconds uint64 +} + +// String implements fmt.Stringer. +func (s SetAttr) String() string { + return fmt.Sprintf("SetAttr{Permissions: 0o%o, UID: %d, GID: %d, Size: %d, ATime: {Sec: %d, NanoSec: %d}, MTime: {Sec: %d, NanoSec: %d}}", s.Permissions, s.UID, s.GID, s.Size, s.ATimeSeconds, s.ATimeNanoSeconds, s.MTimeSeconds, s.MTimeNanoSeconds) +} + +// Decode implements encoder.Decode. +func (s *SetAttr) Decode(b *buffer) { + s.Permissions = b.ReadPermissions() + s.UID = b.ReadUID() + s.GID = b.ReadGID() + s.Size = b.Read64() + s.ATimeSeconds = b.Read64() + s.ATimeNanoSeconds = b.Read64() + s.MTimeSeconds = b.Read64() + s.MTimeNanoSeconds = b.Read64() +} + +// Encode implements encoder.Encode. +func (s *SetAttr) Encode(b *buffer) { + b.WritePermissions(s.Permissions) + b.WriteUID(s.UID) + b.WriteGID(s.GID) + b.Write64(s.Size) + b.Write64(s.ATimeSeconds) + b.Write64(s.ATimeNanoSeconds) + b.Write64(s.MTimeSeconds) + b.Write64(s.MTimeNanoSeconds) +} + +// Dirent is used for readdir. +type Dirent struct { + // QID is the entry QID. + QID QID + + // Offset is the offset in the directory. + // + // This will be communicated back the original caller. + Offset uint64 + + // Type is the 9P type. + Type QIDType + + // Name is the name of the entry (i.e. basename). + Name string +} + +// String implements fmt.Stringer. +func (d Dirent) String() string { + return fmt.Sprintf("Dirent{QID: %d, Offset: %d, Type: 0x%X, Name: %s}", d.QID, d.Offset, d.Type, d.Name) +} + +// Decode implements encoder.Decode. +func (d *Dirent) Decode(b *buffer) { + d.QID.Decode(b) + d.Offset = b.Read64() + d.Type = b.ReadQIDType() + d.Name = b.ReadString() +} + +// Encode implements encoder.Encode. +func (d *Dirent) Encode(b *buffer) { + d.QID.Encode(b) + b.Write64(d.Offset) + b.WriteQIDType(d.Type) + b.WriteString(d.Name) +} diff --git a/pkg/p9/p9_test.go b/pkg/p9/p9_test.go new file mode 100644 index 000000000..a50ac80a4 --- /dev/null +++ b/pkg/p9/p9_test.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "os" + "testing" +) + +func TestFileModeHelpers(t *testing.T) { + fns := map[FileMode]struct { + // name identifies the file mode. + name string + + // function is the function that should return true given the + // right FileMode. + function func(m FileMode) bool + }{ + ModeRegular: { + name: "regular", + function: FileMode.IsRegular, + }, + ModeDirectory: { + name: "directory", + function: FileMode.IsDir, + }, + ModeNamedPipe: { + name: "named pipe", + function: FileMode.IsNamedPipe, + }, + ModeCharacterDevice: { + name: "character device", + function: FileMode.IsCharacterDevice, + }, + ModeBlockDevice: { + name: "block device", + function: FileMode.IsBlockDevice, + }, + ModeSymlink: { + name: "symlink", + function: FileMode.IsSymlink, + }, + ModeSocket: { + name: "socket", + function: FileMode.IsSocket, + }, + } + for mode, info := range fns { + // Make sure the mode doesn't identify as anything but itself. + for testMode, testfns := range fns { + if mode != testMode && testfns.function(mode) { + t.Errorf("Mode %s returned true when asked if it was mode %s", info.name, testfns.name) + } + } + + // Make sure mode identifies as itself. + if !info.function(mode) { + t.Errorf("Mode %s returned false when asked if it was itself", info.name) + } + } +} + +func TestFileModeToQID(t *testing.T) { + for _, test := range []struct { + // name identifies the test. + name string + + // mode is the FileMode we start out with. + mode FileMode + + // want is the corresponding QIDType we expect. + want QIDType + }{ + { + name: "Directories are of type directory", + mode: ModeDirectory, + want: TypeDir, + }, + { + name: "Sockets are append-only files", + mode: ModeSocket, + want: TypeAppendOnly, + }, + { + name: "Named pipes are append-only files", + mode: ModeNamedPipe, + want: TypeAppendOnly, + }, + { + name: "Character devices are append-only files", + mode: ModeCharacterDevice, + want: TypeAppendOnly, + }, + { + name: "Symlinks are of type symlink", + mode: ModeSymlink, + want: TypeSymlink, + }, + { + name: "Regular files are of type regular", + mode: ModeRegular, + want: TypeRegular, + }, + { + name: "Block devices are regular files", + mode: ModeBlockDevice, + want: TypeRegular, + }, + } { + if qidType := test.mode.QIDType(); qidType != test.want { + t.Errorf("ModeToQID test %s failed: got %o, wanted %o", test.name, qidType, test.want) + } + } +} + +func TestP9ModeConverters(t *testing.T) { + for _, m := range []FileMode{ + ModeRegular, + ModeDirectory, + ModeCharacterDevice, + ModeBlockDevice, + ModeSocket, + ModeSymlink, + ModeNamedPipe, + } { + if mb := ModeFromOS(m.OSMode()); mb != m { + t.Errorf("Converting %o to OS.FileMode gives %o and is converted back as %o", m, m.OSMode(), mb) + } + } +} + +func TestOSModeConverters(t *testing.T) { + // Modes that can be converted back and forth. + for _, m := range []os.FileMode{ + 0, // Regular file. + os.ModeDir, + os.ModeCharDevice | os.ModeDevice, + os.ModeDevice, + os.ModeSocket, + os.ModeSymlink, + os.ModeNamedPipe, + } { + if mb := ModeFromOS(m).OSMode(); mb != m { + t.Errorf("Converting %o to p9.FileMode gives %o and is converted back as %o", m, ModeFromOS(m), mb) + } + } + + // Modes that will be converted to a regular file since p9 cannot + // express these. + for _, m := range []os.FileMode{ + os.ModeAppend, + os.ModeExclusive, + os.ModeTemporary, + } { + if p9Mode := ModeFromOS(m); p9Mode != ModeRegular { + t.Errorf("Converting %o to p9.FileMode should have given ModeRegular, but yielded %o", m, p9Mode) + } + } +} + +func TestAttrMaskContains(t *testing.T) { + req := AttrMask{Mode: true, Size: true} + have := AttrMask{} + if have.Contains(req) { + t.Fatalf("AttrMask %v should not be a superset of %v", have, req) + } + have.Mode = true + if have.Contains(req) { + t.Fatalf("AttrMask %v should not be a superset of %v", have, req) + } + have.Size = true + have.MTime = true + if !have.Contains(req) { + t.Fatalf("AttrMask %v should be a superset of %v", have, req) + } +} diff --git a/pkg/p9/p9test/BUILD b/pkg/p9/p9test/BUILD new file mode 100644 index 000000000..339c86089 --- /dev/null +++ b/pkg/p9/p9test/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_test( + name = "p9test_test", + size = "small", + srcs = ["client_test.go"], + embed = [":p9test"], + deps = [ + "//pkg/fd", + "//pkg/p9", + "//pkg/unet", + ], +) + +go_library( + name = "p9test", + srcs = [ + "mocks.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/p9/p9test", + visibility = ["//:sandbox"], + deps = [ + "//pkg/fd", + "//pkg/p9", + ], +) diff --git a/pkg/p9/p9test/client_test.go b/pkg/p9/p9test/client_test.go new file mode 100644 index 000000000..8e35d6017 --- /dev/null +++ b/pkg/p9/p9test/client_test.go @@ -0,0 +1,335 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9test + +import ( + "io/ioutil" + "os" + "reflect" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +func TestDonateFD(t *testing.T) { + // Temporary file. + osFile, err := ioutil.TempFile("", "p9") + if err != nil { + t.Fatalf("could not create temporary file: %v", err) + } + os.Remove(osFile.Name()) + + hfi, err := osFile.Stat() + if err != nil { + osFile.Close() + t.Fatalf("stat failed: %v", err) + } + osFileStat := hfi.Sys().(*syscall.Stat_t) + + f, err := fd.NewFromFile(osFile) + // osFile should always be closed. + osFile.Close() + if err != nil { + t.Fatalf("unable to create file: %v", err) + } + + // Craft attacher to attach to the mocked file which will return our + // temporary file. + fileMock := &FileMock{OpenMock: OpenMock{File: f}} + attacher := &AttachMock{File: fileMock} + + // Make socket pair. + serverSocket, clientSocket, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v wanted nil", err) + } + defer clientSocket.Close() + server := p9.NewServer(attacher) + go server.Handle(serverSocket) + client, err := p9.NewClient(clientSocket, 1024*1024 /* 1M message size */, p9.HighestVersionString()) + if err != nil { + t.Fatalf("new client got %v, expected nil", err) + } + + // Attach to the mocked file. + cFile, err := client.Attach("") + if err != nil { + t.Fatalf("attach failed: %v", err) + } + + // Try to open the mocked file. + clientHostFile, _, _, err := cFile.Open(0) + if err != nil { + t.Fatalf("open failed: %v", err) + } + var clientStat syscall.Stat_t + if err := syscall.Fstat(clientHostFile.FD(), &clientStat); err != nil { + t.Fatalf("stat failed: %v", err) + } + + // Compare inode nums to make sure it's the same file. + if clientStat.Ino != osFileStat.Ino { + t.Errorf("fd donation failed") + } +} + +// TestClient is a megatest. +// +// This allows us to probe various edge cases, while changing the state of the +// underlying server in expected ways. The test slowly builds server state and +// is documented inline. +// +// We wind up with the following, after probing edge cases: +// +// FID 1: ServerFile (sf). +// FID 2: Directory (d). +// FID 3: File (f). +// FID 4: Symlink (s). +// +// Although you should use the FID method on the individual files. +func TestClient(t *testing.T) { + var ( + // Sentinel error. + sentinelErr = syscall.Errno(4383) + + // Backend mocks. + a = &AttachMock{} + sf = &FileMock{} + d = &FileMock{} + f = &FileMock{} + s = &FileMock{} + + // Client Files for the above. + sfFile p9.File + ) + + testSteps := []struct { + name string + fn func(*p9.Client) error + want error + }{ + { + name: "bad-attach", + want: sentinelErr, + fn: func(c *p9.Client) error { + a.File = nil + a.Err = sentinelErr + _, err := c.Attach("") + return err + }, + }, + { + name: "attach", + fn: func(c *p9.Client) error { + a.Called = false + a.File = sf + a.Err = nil + var err error + sfFile, err = c.Attach("foo") + if !a.Called { + t.Errorf("Attach never Called?") + } + if a.AttachName != "foo" { + // This wasn't carried through? + t.Errorf("attachName got %v wanted foo", a.AttachName) + } + return err + }, + }, + { + name: "bad-walk", + want: sentinelErr, + fn: func(c *p9.Client) error { + sf.WalkMock.File = d + sf.WalkMock.Err = sentinelErr + _, _, err := sfFile.Walk([]string{"foo", "bar"}) + return err + }, + }, + { + name: "walk-to-dir", + fn: func(c *p9.Client) error { + sf.WalkMock.Called = false + sf.WalkMock.File = d + sf.WalkMock.Err = nil + sf.WalkMock.QIDs = []p9.QID{{Type: 1}} + var qids []p9.QID + var err error + qids, _, err = sfFile.Walk([]string{"foo", "bar"}) + if !sf.WalkMock.Called { + t.Errorf("Walk never Called?") + } + if !reflect.DeepEqual(sf.WalkMock.Names, []string{"foo", "bar"}) { + t.Errorf("got names %v wanted []{foo, bar}", sf.WalkMock.Names) + } + if len(qids) != 1 || qids[0].Type != 1 { + t.Errorf("got qids %v wanted []{{Type: 1}}", qids) + } + return err + }, + }, + { + name: "walkgetattr-to-dir", + fn: func(c *p9.Client) error { + sf.WalkGetAttrMock.Called = false + sf.WalkGetAttrMock.File = d + sf.WalkGetAttrMock.Err = nil + sf.WalkGetAttrMock.QIDs = []p9.QID{{Type: 1}} + sf.WalkGetAttrMock.Attr = p9.Attr{UID: 1} + sf.WalkGetAttrMock.Valid = p9.AttrMask{Mode: true} + var qids []p9.QID + var err error + var mask p9.AttrMask + var attr p9.Attr + qids, _, mask, attr, err = sfFile.WalkGetAttr([]string{"foo", "bar"}) + if !sf.WalkGetAttrMock.Called { + t.Errorf("Walk never Called?") + } + if !reflect.DeepEqual(sf.WalkGetAttrMock.Names, []string{"foo", "bar"}) { + t.Errorf("got names %v wanted []{foo, bar}", sf.WalkGetAttrMock.Names) + } + if len(qids) != 1 || qids[0].Type != 1 { + t.Errorf("got qids %v wanted []{{Type: 1}}", qids) + } + if !reflect.DeepEqual(attr, sf.WalkGetAttrMock.Attr) { + t.Errorf("got attrs %s wanted %s", attr, sf.WalkGetAttrMock.Attr) + } + if !reflect.DeepEqual(mask, sf.WalkGetAttrMock.Valid) { + t.Errorf("got mask %s wanted %s", mask, sf.WalkGetAttrMock.Valid) + } + return err + }, + }, + { + name: "walk-to-file", + fn: func(c *p9.Client) error { + // Basic sanity check is done in walk-to-dir. + // + // Here we just create basic file FIDs to use. + sf.WalkMock.File = f + sf.WalkMock.Err = nil + var err error + _, _, err = sfFile.Walk(nil) + return err + }, + }, + { + name: "walk-to-symlink", + fn: func(c *p9.Client) error { + // See note in walk-to-file. + sf.WalkMock.File = s + sf.WalkMock.Err = nil + var err error + _, _, err = sfFile.Walk(nil) + return err + }, + }, + { + name: "bad-statfs", + want: sentinelErr, + fn: func(c *p9.Client) error { + sf.StatFSMock.Err = sentinelErr + _, err := sfFile.StatFS() + return err + }, + }, + { + name: "statfs", + fn: func(c *p9.Client) error { + sf.StatFSMock.Called = false + sf.StatFSMock.Stat = p9.FSStat{Type: 1} + sf.StatFSMock.Err = nil + stat, err := sfFile.StatFS() + if !sf.StatFSMock.Called { + t.Errorf("StatfS never Called?") + } + if stat.Type != 1 { + t.Errorf("got stat %v wanted {Type: 1}", stat) + } + return err + }, + }, + } + + // First, create a new server and connection. + serverSocket, clientSocket, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v wanted nil", err) + } + defer clientSocket.Close() + server := p9.NewServer(a) + go server.Handle(serverSocket) + client, err := p9.NewClient(clientSocket, 1024*1024 /* 1M message size */, p9.HighestVersionString()) + if err != nil { + t.Fatalf("new client got err %v, wanted nil", err) + } + + // Now, run through each of the test steps. + for _, step := range testSteps { + err := step.fn(client) + if err != step.want { + // Don't fail, just note this one step failed. + t.Errorf("step %q got %v wanted %v", step.name, err, step.want) + } + } +} + +func BenchmarkClient(b *testing.B) { + // Backend mock. + a := &AttachMock{ + File: &FileMock{ + ReadAtMock: ReadAtMock{N: 1}, + }, + } + + // First, create a new server and connection. + serverSocket, clientSocket, err := unet.SocketPair(false) + if err != nil { + b.Fatalf("socketpair got err %v wanted nil", err) + } + defer clientSocket.Close() + server := p9.NewServer(a) + go server.Handle(serverSocket) + client, err := p9.NewClient(clientSocket, 1024*1024 /* 1M message size */, p9.HighestVersionString()) + if err != nil { + b.Fatalf("new client got %v, expected nil", err) + } + + // Attach to the server. + f, err := client.Attach("") + if err != nil { + b.Fatalf("error during attach, got %v wanted nil", err) + } + + // Open the file. + if _, _, _, err := f.Open(p9.ReadOnly); err != nil { + b.Fatalf("error during open, got %v wanted nil", err) + } + + // Reset the clock. + b.ResetTimer() + + // Do N reads. + var buf [1]byte + for i := 0; i < b.N; i++ { + _, err := f.ReadAt(buf[:], 0) + if err != nil { + b.Fatalf("error during read %d, got %v wanted nil", i, err) + } + } +} diff --git a/pkg/p9/p9test/mocks.go b/pkg/p9/p9test/mocks.go new file mode 100644 index 000000000..e10f206dd --- /dev/null +++ b/pkg/p9/p9test/mocks.go @@ -0,0 +1,490 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9test + +import ( + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/p9" +) + +// StatFSMock mocks p9.File.StatFS. +type StatFSMock struct { + Called bool + + // Return. + Stat p9.FSStat + Err error +} + +// StatFS implements p9.File.StatFS. +func (f *StatFSMock) StatFS() (p9.FSStat, error) { + f.Called = true + return f.Stat, f.Err +} + +// GetAttrMock mocks p9.File.GetAttr. +type GetAttrMock struct { + Called bool + + // Args. + Req p9.AttrMask + + // Return. + QID p9.QID + Valid p9.AttrMask + Attr p9.Attr + Err error +} + +// GetAttr implements p9.File.GetAttr. +func (g *GetAttrMock) GetAttr(req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + g.Called, g.Req = true, req + return g.QID, g.Valid, g.Attr, g.Err +} + +// WalkGetAttrMock mocks p9.File.WalkGetAttr. +type WalkGetAttrMock struct { + Called bool + + // Args. + Names []string + + // Return. + QIDs []p9.QID + File p9.File + Valid p9.AttrMask + Attr p9.Attr + Err error +} + +// WalkGetAttr implements p9.File.WalkGetAttr. +func (w *WalkGetAttrMock) WalkGetAttr(names []string) ([]p9.QID, p9.File, p9.AttrMask, p9.Attr, error) { + w.Called, w.Names = true, names + return w.QIDs, w.File, w.Valid, w.Attr, w.Err +} + +// SetAttrMock mocks p9.File.SetAttr. +type SetAttrMock struct { + Called bool + + // Args. + Valid p9.SetAttrMask + Attr p9.SetAttr + + // Return. + Err error +} + +// SetAttr implements p9.File.SetAttr. +func (s *SetAttrMock) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error { + s.Called, s.Valid, s.Attr = true, valid, attr + return s.Err +} + +// RemoveMock mocks p9.File.Remove. +type RemoveMock struct { + Called bool + + // Return. + Err error +} + +// Remove implements p9.File.Remove. +func (r *RemoveMock) Remove() error { + r.Called = true + return r.Err +} + +// OpenMock mocks p9.File.Open. +type OpenMock struct { + Called bool + + // Args. + Flags p9.OpenFlags + + // Return. + File *fd.FD + QID p9.QID + IOUnit uint32 + Err error +} + +// Open implements p9.File.Open. +func (o *OpenMock) Open(flags p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) { + o.Called, o.Flags = true, flags + return o.File, o.QID, o.IOUnit, o.Err +} + +// ReadAtMock mocks p9.File.ReadAt. +type ReadAtMock struct { + Called bool + + // Args. + P []byte + Offset uint64 + + // Return. + N int + Err error +} + +// ReadAt implements p9.File.ReadAt. +func (r *ReadAtMock) ReadAt(p []byte, offset uint64) (int, error) { + r.Called, r.P, r.Offset = true, p, offset + return r.N, r.Err +} + +// WriteAtMock mocks p9.File.WriteAt. +type WriteAtMock struct { + Called bool + + // Args. + P []byte + Offset uint64 + + // Return. + N int + Err error +} + +// WriteAt implements p9.File.WriteAt. +func (w *WriteAtMock) WriteAt(p []byte, offset uint64) (int, error) { + w.Called, w.P, w.Offset = true, p, offset + return w.N, w.Err +} + +// FSyncMock mocks p9.File.FSync. +type FSyncMock struct { + Called bool + + // Return. + Err error +} + +// FSync implements p9.File.FSync. +func (f *FSyncMock) FSync() error { + f.Called = true + return f.Err +} + +// MkdirMock mocks p9.File.Mkdir. +type MkdirMock struct { + Called bool + + // Args. + Name string + Permissions p9.FileMode + UID p9.UID + GID p9.GID + + // Return. + QID p9.QID + Err error +} + +// Mkdir implements p9.File.Mkdir. +func (s *MkdirMock) Mkdir(name string, permissions p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) { + s.Called, s.Name, s.Permissions, s.UID, s.GID = true, name, permissions, uid, gid + return s.QID, s.Err +} + +// SymlinkMock mocks p9.File.Symlink. +type SymlinkMock struct { + Called bool + + // Args. + Oldname string + Newname string + UID p9.UID + GID p9.GID + + // Return. + QID p9.QID + Err error +} + +// Symlink implements p9.File.Symlink. +func (s *SymlinkMock) Symlink(oldname string, newname string, uid p9.UID, gid p9.GID) (p9.QID, error) { + s.Called, s.Oldname, s.Newname, s.UID, s.GID = true, oldname, newname, uid, gid + return s.QID, s.Err +} + +// MknodMock mocks p9.File.Mknod. +type MknodMock struct { + Called bool + + // Args. + Name string + Permissions p9.FileMode + Major uint32 + Minor uint32 + UID p9.UID + GID p9.GID + + // Return. + QID p9.QID + Err error +} + +// Mknod implements p9.File.Mknod. +func (m *MknodMock) Mknod(name string, permissions p9.FileMode, major uint32, minor uint32, uid p9.UID, gid p9.GID) (p9.QID, error) { + m.Called, m.Name, m.Permissions, m.Major, m.Minor, m.UID, m.GID = true, name, permissions, major, minor, uid, gid + return m.QID, m.Err +} + +// UnlinkAtMock mocks p9.File.UnlinkAt. +type UnlinkAtMock struct { + Called bool + + // Args. + Name string + Flags uint32 + + // Return. + Err error +} + +// UnlinkAt implements p9.File.UnlinkAt. +func (u *UnlinkAtMock) UnlinkAt(name string, flags uint32) error { + u.Called, u.Name, u.Flags = true, name, flags + return u.Err +} + +// ReaddirMock mocks p9.File.Readdir. +type ReaddirMock struct { + Called bool + + // Args. + Offset uint64 + Count uint32 + + // Return. + Dirents []p9.Dirent + Err error +} + +// Readdir implements p9.File.Readdir. +func (r *ReaddirMock) Readdir(offset uint64, count uint32) ([]p9.Dirent, error) { + r.Called, r.Offset, r.Count = true, offset, count + return r.Dirents, r.Err +} + +// ReadlinkMock mocks p9.File.Readlink. +type ReadlinkMock struct { + Called bool + + // Return. + Target string + Err error +} + +// Readlink implements p9.File.Readlink. +func (r *ReadlinkMock) Readlink() (string, error) { + r.Called = true + return r.Target, r.Err +} + +// AttachMock mocks p9.Attacher.Attach. +type AttachMock struct { + Called bool + + // Args. + AttachName string + + // Return. + File p9.File + Err error +} + +// Attach implements p9.Attacher.Attach. +func (a *AttachMock) Attach(attachName string) (p9.File, error) { + a.Called, a.AttachName = true, attachName + return a.File, a.Err +} + +// WalkMock mocks p9.File.Walk. +type WalkMock struct { + Called bool + + // Args. + Names []string + + // Return. + QIDs []p9.QID + File p9.File + Err error +} + +// Walk implements p9.File.Walk. +func (w *WalkMock) Walk(names []string) ([]p9.QID, p9.File, error) { + w.Called, w.Names = true, names + return w.QIDs, w.File, w.Err +} + +// RenameMock mocks p9.File.Rename. +type RenameMock struct { + Called bool + + // Args. + Directory p9.File + Name string + + // Return. + Err error +} + +// Rename implements p9.File.Rename. +func (r *RenameMock) Rename(directory p9.File, name string) error { + r.Called, r.Directory, r.Name = true, directory, name + return r.Err +} + +// CloseMock mocks p9.File.Close. +type CloseMock struct { + Called bool + + // Return. + Err error +} + +// Close implements p9.File.Close. +func (d *CloseMock) Close() error { + d.Called = true + return d.Err +} + +// CreateMock mocks p9.File.Create. +type CreateMock struct { + Called bool + + // Args. + Name string + Flags p9.OpenFlags + Permissions p9.FileMode + UID p9.UID + GID p9.GID + + // Return. + HostFile *fd.FD + File p9.File + QID p9.QID + IOUnit uint32 + Err error +} + +// Create implements p9.File.Create. +func (c *CreateMock) Create(name string, flags p9.OpenFlags, permissions p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, p9.File, p9.QID, uint32, error) { + c.Called, c.Name, c.Flags, c.Permissions, c.UID, c.GID = true, name, flags, permissions, uid, gid + return c.HostFile, c.File, c.QID, c.IOUnit, c.Err +} + +// LinkMock mocks p9.File.Link. +type LinkMock struct { + Called bool + + // Args. + Target p9.File + Newname string + + // Return. + Err error +} + +// Link implements p9.File.Link. +func (l *LinkMock) Link(target p9.File, newname string) error { + l.Called, l.Target, l.Newname = true, target, newname + return l.Err +} + +// RenameAtMock mocks p9.File.RenameAt. +type RenameAtMock struct { + Called bool + + // Args. + Oldname string + Newdir p9.File + Newname string + + // Return. + Err error +} + +// RenameAt implements p9.File.RenameAt. +func (r *RenameAtMock) RenameAt(oldname string, newdir p9.File, newname string) error { + r.Called, r.Oldname, r.Newdir, r.Newname = true, oldname, newdir, newname + return r.Err +} + +// FlushMock mocks p9.File.Flush. +type FlushMock struct { + Called bool + + // Return. + Err error +} + +// Flush implements p9.File.Flush. +func (f *FlushMock) Flush() error { + return f.Err +} + +// ConnectMock mocks p9.File.Connect. +type ConnectMock struct { + Called bool + + // Args. + Flags p9.ConnectFlags + + // Return. + File *fd.FD + Err error +} + +// Connect implements p9.File.Connect. +func (o *ConnectMock) Connect(flags p9.ConnectFlags) (*fd.FD, error) { + o.Called, o.Flags = true, flags + return o.File, o.Err +} + +// FileMock mocks p9.File. +type FileMock struct { + WalkMock + WalkGetAttrMock + StatFSMock + GetAttrMock + SetAttrMock + RemoveMock + RenameMock + CloseMock + OpenMock + ReadAtMock + WriteAtMock + FSyncMock + CreateMock + MkdirMock + SymlinkMock + LinkMock + MknodMock + RenameAtMock + UnlinkAtMock + ReaddirMock + ReadlinkMock + FlushMock + ConnectMock +} + +var ( + _ p9.File = &FileMock{} +) diff --git a/pkg/p9/pool.go b/pkg/p9/pool.go new file mode 100644 index 000000000..9a508b898 --- /dev/null +++ b/pkg/p9/pool.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "sync" +) + +// pool is a simple allocator. +// +// It is used for both tags and FIDs. +type pool struct { + mu sync.Mutex + + // cache is the set of returned values. + cache []uint64 + + // start is the starting value (if needed). + start uint64 + + // max is the current maximum issued. + max uint64 + + // limit is the upper limit. + limit uint64 +} + +// Get gets a value from the pool. +func (p *pool) Get() (uint64, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + // Anything cached? + if len(p.cache) > 0 { + v := p.cache[len(p.cache)-1] + p.cache = p.cache[:len(p.cache)-1] + return v, true + } + + // Over the limit? + if p.start == p.limit { + return 0, false + } + + // Generate a new value. + v := p.start + p.start++ + return v, true +} + +// Put returns a value to the pool. +func (p *pool) Put(v uint64) { + p.mu.Lock() + p.cache = append(p.cache, v) + p.mu.Unlock() +} diff --git a/pkg/p9/pool_test.go b/pkg/p9/pool_test.go new file mode 100644 index 000000000..96be2c8bd --- /dev/null +++ b/pkg/p9/pool_test.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "testing" +) + +func TestPoolUnique(t *testing.T) { + p := pool{start: 1, limit: 3} + got := make(map[uint64]bool) + + for { + n, ok := p.Get() + if !ok { + break + } + + // Check unique. + if _, ok := got[n]; ok { + t.Errorf("pool spit out %v multiple times", n) + } + + // Record. + got[n] = true + } +} + +func TestExausted(t *testing.T) { + p := pool{start: 1, limit: 500} + for i := 0; i < 499; i++ { + _, ok := p.Get() + if !ok { + t.Fatalf("pool exhausted before 499 items") + } + } + + _, ok := p.Get() + if ok { + t.Errorf("pool not exhausted when it should be") + } +} + +func TestPoolRecycle(t *testing.T) { + p := pool{start: 1, limit: 500} + n1, _ := p.Get() + p.Put(n1) + n2, _ := p.Get() + if n1 != n2 { + t.Errorf("pool not recycling items") + } +} diff --git a/pkg/p9/server.go b/pkg/p9/server.go new file mode 100644 index 000000000..2965ae16e --- /dev/null +++ b/pkg/p9/server.go @@ -0,0 +1,380 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// Server is a 9p2000.L server. +type Server struct { + // attacher provides the attach function. + attacher Attacher +} + +// NewServer returns a new server. +// +func NewServer(attacher Attacher) *Server { + return &Server{ + attacher: attacher, + } +} + +// connState is the state for a single connection. +type connState struct { + // server is the backing server. + server *Server + + // sendMu is the send lock. + sendMu sync.Mutex + + // conn is the connection. + conn *unet.Socket + + // fids is the set of active FIDs. + // + // This is used to find FIDs for files. + fidMu sync.Mutex + fids map[FID]*fidRef + + // tags is the set of active tags. + // + // The given channel is closed when the + // tag is finished with processing. + tagMu sync.Mutex + tags map[Tag]chan struct{} + + // messageSize is the maximum message size. The server does not + // do automatic splitting of messages. + messageSize uint32 + + // version is the agreed upon version X of 9P2000.L.Google.X. + // version 0 implies 9P2000.L. + version uint32 + + // recvOkay indicates that a receive may start. + recvOkay chan bool + + // recvDone is signalled when a message is received. + recvDone chan error + + // sendDone is signalled when a send is finished. + sendDone chan error +} + +// fidRef wraps a node and tracks references. +type fidRef struct { + // file is the associated File. + file File + + // refs is an active refence count. + // + // The node above will be closed only when refs reaches zero. + refs int64 + + // openedMu protects opened and openFlags. + openedMu sync.Mutex + + // opened indicates whether this has been opened already. + // + // This is updated in handlers.go. + opened bool + + // openFlags is the mode used in the open. + // + // This is updated in handlers.go. + openFlags OpenFlags +} + +// OpenFlags returns the flags the file was opened with and true iff the fid was opened previously. +func (f *fidRef) OpenFlags() (OpenFlags, bool) { + f.openedMu.Lock() + defer f.openedMu.Unlock() + return f.openFlags, f.opened +} + +// DecRef should be called when you're finished with a fid. +func (f *fidRef) DecRef() { + if atomic.AddInt64(&f.refs, -1) == 0 { + f.file.Close() + } +} + +// LookupFID finds the given FID. +// +// You should call fid.DecRef when you are finished using the fid. +func (cs *connState) LookupFID(fid FID) (*fidRef, bool) { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + fidRef, ok := cs.fids[fid] + if ok { + atomic.AddInt64(&fidRef.refs, 1) + return fidRef, true + } + return nil, false +} + +// InsertFID installs the given FID. +// +// This fid starts with a reference count of one. If a FID exists in +// the slot already it is closed, per the specification. +func (cs *connState) InsertFID(fid FID, newRef *fidRef) { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + origRef, ok := cs.fids[fid] + if ok { + defer origRef.DecRef() + } + atomic.AddInt64(&newRef.refs, 1) + cs.fids[fid] = newRef +} + +// DeleteFID removes the given FID. +// +// This simply removes it from the map and drops a reference. +func (cs *connState) DeleteFID(fid FID) bool { + cs.fidMu.Lock() + defer cs.fidMu.Unlock() + fidRef, ok := cs.fids[fid] + if !ok { + return false + } + delete(cs.fids, fid) + fidRef.DecRef() + return true +} + +// StartTag starts handling the tag. +// +// False is returned if this tag is already active. +func (cs *connState) StartTag(t Tag) bool { + cs.tagMu.Lock() + defer cs.tagMu.Unlock() + _, ok := cs.tags[t] + if ok { + return false + } + cs.tags[t] = make(chan struct{}) + return true +} + +// ClearTag finishes handling a tag. +func (cs *connState) ClearTag(t Tag) { + cs.tagMu.Lock() + defer cs.tagMu.Unlock() + ch, ok := cs.tags[t] + if !ok { + // Should never happen. + panic("unused tag cleared") + } + delete(cs.tags, t) + + // Notify. + close(ch) +} + +// WaitTag waits for a tag to finish. +func (cs *connState) WaitTag(t Tag) { + cs.tagMu.Lock() + ch, ok := cs.tags[t] + cs.tagMu.Unlock() + if !ok { + return + } + + // Wait for close. + <-ch +} + +// handleRequest handles a single request. +// +// The recvDone channel is signaled when recv is done (with a error if +// necessary). The sendDone channel is signaled with the result of the send. +func (cs *connState) handleRequest() { + messageSize := atomic.LoadUint32(&cs.messageSize) + if messageSize == 0 { + // Default or not yet negotiated. + messageSize = maximumLength + } + + // Receive a message. + tag, m, err := recv(cs.conn, messageSize, messageByType) + if errSocket, ok := err.(ErrSocket); ok { + // Connection problem; stop serving. + cs.recvDone <- errSocket.error + return + } + + // Signal receive is done. + cs.recvDone <- nil + + // Deal with other errors. + if err != nil { + // If it's not a connection error, but some other protocol error, + // we can send a response immediately. + log.Debugf("err [%05d] %v", tag, err) + cs.sendMu.Lock() + err := send(cs.conn, tag, newErr(err)) + cs.sendMu.Unlock() + cs.sendDone <- err + return + } + + // Try to start the tag. + if !cs.StartTag(tag) { + // Nothing we can do at this point; client is bogus. + cs.sendDone <- ErrNoValidMessage + return + } + + // Handle the message. + var r message + if handler, ok := m.(handler); ok { + // Call the message handler. + r = handler.handle(cs) + } else { + // Produce an ENOSYS error. + r = newErr(syscall.ENOSYS) + } + + // Clear the tag before sending. That's because as soon + // as this hits the wire, the client can legally send + // another message with the same tag. + cs.ClearTag(tag) + + // Send back the result. + cs.sendMu.Lock() + err = send(cs.conn, tag, r) + cs.sendMu.Unlock() + cs.sendDone <- err + return +} + +func (cs *connState) handleRequests() { + for range cs.recvOkay { + cs.handleRequest() + } +} + +func (cs *connState) stop() { + // Close all channels. + close(cs.recvOkay) + close(cs.recvDone) + close(cs.sendDone) + + for _, fidRef := range cs.fids { + // Drop final reference in the FID table. Note this should + // always close the file, since we've ensured that there are no + // handlers running via the wait for Pending => 0 below. + fidRef.DecRef() + } + + // Ensure the connection is closed. + cs.conn.Close() +} + +// service services requests concurrently. +func (cs *connState) service() error { + // Pending is the number of handlers that have finished receiving but + // not finished processing requests. These must be waiting on properly + // below. See the next comment for an explanation of the loop. + pending := 0 + + // Start the first request handler. + go cs.handleRequests() // S/R-SAFE: Irrelevant. + cs.recvOkay <- true + + // We loop and make sure there's always one goroutine waiting for a new + // request. We process all the data for a single request in one + // goroutine however, to ensure the best turnaround time possible. + for { + select { + case err := <-cs.recvDone: + if err != nil { + // Wait for pending handlers. + for i := 0; i < pending; i++ { + <-cs.sendDone + } + return err + } + + // This handler is now pending. + pending++ + + // Kick the next receiver, or start a new handler + // if no receiver is currently waiting. + select { + case cs.recvOkay <- true: + default: + go cs.handleRequests() // S/R-SAFE: Irrelevant. + cs.recvOkay <- true + } + + case <-cs.sendDone: + // This handler is finished. + pending-- + + // Error sending a response? Nothing can be done. + // + // We don't terminate on a send error though, since + // we still have a pending receive. The error would + // have been logged above, we just ignore it here. + } + } +} + +// Handle handles a single connection. +func (s *Server) Handle(conn *unet.Socket) error { + cs := &connState{ + server: s, + conn: conn, + fids: make(map[FID]*fidRef), + tags: make(map[Tag]chan struct{}), + recvOkay: make(chan bool), + recvDone: make(chan error, 10), + sendDone: make(chan error, 10), + } + defer cs.stop() + return cs.service() +} + +// Serve handles requests from the bound socket. +// +// The passed serverSocket _must_ be created in packet mode. +func (s *Server) Serve(serverSocket *unet.ServerSocket) error { + var wg sync.WaitGroup + defer wg.Wait() + + for { + conn, err := serverSocket.Accept() + if err != nil { + // Something went wrong. + // + // Socket closed? + return err + } + + wg.Add(1) + go func(conn *unet.Socket) { // S/R-SAFE: Irrelevant. + s.Handle(conn) + wg.Done() + }(conn) + } +} diff --git a/pkg/p9/transport.go b/pkg/p9/transport.go new file mode 100644 index 000000000..c42b41fcf --- /dev/null +++ b/pkg/p9/transport.go @@ -0,0 +1,346 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// ErrSocket is returned in cases of a socket issue. +// +// This may be treated differently than other errors. +type ErrSocket struct { + // error is the socket error. + error +} + +// ErrMessageTooLarge indicates the size was larger than reasonable. +type ErrMessageTooLarge struct { + size uint32 + msize uint32 +} + +// Error returns a sensible error. +func (e *ErrMessageTooLarge) Error() string { + return fmt.Sprintf("message too large for fixed buffer: size is %d, limit is %d", e.size, e.msize) +} + +// ErrNoValidMessage indicates no valid message could be decoded. +var ErrNoValidMessage = errors.New("buffer contained no valid message") + +const ( + // headerLength is the number of bytes required for a header. + headerLength uint32 = 7 + + // maximumLength is the largest possible message. + maximumLength uint32 = 4 * 1024 * 1024 + + // initialBufferLength is the initial data buffer we allocate. + initialBufferLength uint32 = 64 +) + +var dataPool = sync.Pool{ + New: func() interface{} { + // These buffers are used for decoding without a payload. + return make([]byte, initialBufferLength) + }, +} + +// send sends the given message over the socket. +func send(s *unet.Socket, tag Tag, m message) error { + data := dataPool.Get().([]byte) + dataBuf := buffer{data: data[:0]} + + if log.IsLogging(log.Debug) { + log.Debugf("send [FD %d] [Tag %06d] %s", s.FD(), tag, m.String()) + } + + // Encode the message. The buffer will grow automatically. + m.Encode(&dataBuf) + + // Get our vectors to send. + var hdr [headerLength]byte + vecs := make([][]byte, 0, 3) + vecs = append(vecs, hdr[:]) + if len(dataBuf.data) > 0 { + vecs = append(vecs, dataBuf.data) + } + totalLength := headerLength + uint32(len(dataBuf.data)) + + // Is there a payload? + if payloader, ok := m.(payloader); ok { + p := payloader.Payload() + if len(p) > 0 { + vecs = append(vecs, p) + totalLength += uint32(len(p)) + } + } + + // Construct the header. + headerBuf := buffer{data: hdr[:0]} + headerBuf.Write32(totalLength) + headerBuf.WriteMsgType(m.Type()) + headerBuf.WriteTag(tag) + + // Pack any files if necessary. + w := s.Writer(true) + if filer, ok := m.(filer); ok { + if f := filer.FilePayload(); f != nil { + defer f.Close() + // Pack the file into the message. + w.PackFDs(f.FD()) + } + } + + for n := 0; n < int(totalLength); { + cur, err := w.WriteVec(vecs) + if err != nil { + return ErrSocket{err} + } + n += cur + + // Consume iovecs. + for consumed := 0; consumed < cur; { + if len(vecs[0]) <= cur-consumed { + consumed += len(vecs[0]) + vecs = vecs[1:] + } else { + vecs[0] = vecs[0][cur-consumed:] + break + } + } + + if n > 0 && n < int(totalLength) { + // Don't resend any control message. + w.UnpackFDs() + } + } + + // All set. + dataPool.Put(dataBuf.data) + return nil +} + +// lookupTagAndType looks up an existing message or creates a new one. +// +// This is called by recv after decoding the header. Any error returned will be +// propagating back to the caller. You may use messageByType directly as a +// lookupTagAndType function (by design). +type lookupTagAndType func(tag Tag, t MsgType) (message, error) + +// recv decodes a message from the socket. +// +// This is done in two parts, and is thus not safe for multiple callers. +// +// On a socket error, the special error type ErrSocket is returned. +// +// The tag value NoTag will always be returned if err is non-nil. +func recv(s *unet.Socket, msize uint32, lookup lookupTagAndType) (Tag, message, error) { + // Read a header. + // + // Since the send above is atomic, we must always receive control + // messages along with the header. This means we need to be careful + // about closing FDs during errors to prevent leaks. + var hdr [headerLength]byte + r := s.Reader(true) + r.EnableFDs(1) + + n, err := r.ReadVec([][]byte{hdr[:]}) + if err != nil { + r.CloseFDs() + return NoTag, nil, ErrSocket{err} + } + + fds, err := r.ExtractFDs() + if err != nil { + return NoTag, nil, ErrSocket{err} + } + defer func() { + // Close anything left open. The case where + // fds are caught and used is handled below, + // and the fds variable will be set to nil. + for _, fd := range fds { + syscall.Close(fd) + } + }() + r.EnableFDs(0) + + // Continuing reading for a short header. + for n < int(headerLength) { + cur, err := r.ReadVec([][]byte{hdr[n:]}) + if err != nil { + return NoTag, nil, ErrSocket{err} + } else if cur == 0 { + return NoTag, nil, ErrSocket{io.EOF} + } + n += cur + } + + // Decode the header. + headerBuf := buffer{data: hdr[:]} + size := headerBuf.Read32() + t := headerBuf.ReadMsgType() + tag := headerBuf.ReadTag() + if size < headerLength { + // The message is too small. + // + // See above: it's probably screwed. + return NoTag, nil, ErrNoValidMessage + } + if size > maximumLength || size > msize { + // The message is too big. + return NoTag, nil, &ErrMessageTooLarge{size, msize} + } + remaining := size - headerLength + + // Find our message to decode. + m, err := lookup(tag, t) + if err != nil { + // Throw away the contents of this message. + if remaining > 0 { + io.Copy(ioutil.Discard, &io.LimitedReader{R: s, N: int64(remaining)}) + } + return tag, nil, err + } + + // Not yet initialized. + var dataBuf buffer + + // Read the rest of the payload. + // + // This requires some special care to ensure that the vectors all line + // up the way they should. We do this to minimize copying data around. + var vecs [][]byte + if payloader, ok := m.(payloader); ok { + fixedSize := payloader.FixedSize() + + // Do we need more than there is? + if fixedSize > remaining { + // This is not a valid message. + if remaining > 0 { + io.Copy(ioutil.Discard, &io.LimitedReader{R: s, N: int64(remaining)}) + } + return NoTag, nil, ErrNoValidMessage + } + + if fixedSize != 0 { + // Pull a data buffer from the pool. + data := dataPool.Get().([]byte) + if int(fixedSize) > len(data) { + // Create a larger data buffer, ensuring + // sufficient capicity for the message. + data = make([]byte, fixedSize) + defer dataPool.Put(data) + dataBuf = buffer{data: data} + vecs = append(vecs, data) + } else { + // Limit the data buffer, and make sure it + // gets filled before the payload buffer. + defer dataPool.Put(data) + dataBuf = buffer{data: data[:fixedSize]} + vecs = append(vecs, data[:fixedSize]) + } + } + + // Include the payload. + p := payloader.Payload() + if p == nil || len(p) != int(remaining-fixedSize) { + p = make([]byte, remaining-fixedSize) + payloader.SetPayload(p) + } + if len(p) > 0 { + vecs = append(vecs, p) + } + } else if remaining != 0 { + // Pull a data buffer from the pool. + data := dataPool.Get().([]byte) + if int(remaining) > len(data) { + // Create a larger data buffer. + data = make([]byte, remaining) + defer dataPool.Put(data) + dataBuf = buffer{data: data} + vecs = append(vecs, data) + } else { + // Limit the data buffer. + defer dataPool.Put(data) + dataBuf = buffer{data: data[:remaining]} + vecs = append(vecs, data[:remaining]) + } + } + + if len(vecs) > 0 { + // Read the rest of the message. + // + // No need to handle a control message. + r := s.Reader(true) + for n := 0; n < int(remaining); { + cur, err := r.ReadVec(vecs) + if err != nil { + return NoTag, nil, ErrSocket{err} + } else if cur == 0 { + return NoTag, nil, ErrSocket{io.EOF} + } + n += cur + + // Consume iovecs. + for consumed := 0; consumed < cur; { + if len(vecs[0]) <= cur-consumed { + consumed += len(vecs[0]) + vecs = vecs[1:] + } else { + vecs[0] = vecs[0][cur-consumed:] + break + } + } + } + } + + // Decode the message data. + m.Decode(&dataBuf) + if dataBuf.isOverrun() { + // No need to drain the socket. + return NoTag, nil, ErrNoValidMessage + } + + // Save the file, if any came out. + if filer, ok := m.(filer); ok && len(fds) > 0 { + // Set the file object. + filer.SetFilePayload(fd.New(fds[0])) + + // Close the rest. We support only one. + for i := 1; i < len(fds); i++ { + syscall.Close(fds[i]) + } + + // Don't close in the defer. + fds = nil + } + + if log.IsLogging(log.Debug) { + log.Debugf("recv [FD %d] [Tag %06d] %s", s.FD(), tag, m.String()) + } + + // All set. + return tag, m, nil +} diff --git a/pkg/p9/transport_test.go b/pkg/p9/transport_test.go new file mode 100644 index 000000000..e3ee3e9bd --- /dev/null +++ b/pkg/p9/transport_test.go @@ -0,0 +1,184 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "io/ioutil" + "os" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +const ( + MsgTypeBadEncode = iota + 252 + MsgTypeBadDecode + MsgTypeUnregistered +) + +func TestSendRecv(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer server.Close() + defer client.Close() + + if err := send(client, Tag(1), &Tlopen{}); err != nil { + t.Fatalf("send got err %v expected nil", err) + } + + tag, m, err := recv(server, maximumLength, messageByType) + if err != nil { + t.Fatalf("recv got err %v expected nil", err) + } + if tag != Tag(1) { + t.Fatalf("got tag %v expected 1", tag) + } + if _, ok := m.(*Tlopen); !ok { + t.Fatalf("got message %v expected *Tlopen", m) + } +} + +// badDecode overruns on decode. +type badDecode struct{} + +func (*badDecode) Decode(b *buffer) { b.markOverrun() } +func (*badDecode) Encode(b *buffer) {} +func (*badDecode) Type() MsgType { return MsgTypeBadDecode } +func (*badDecode) String() string { return "badDecode{}" } + +func TestRecvOverrun(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer server.Close() + defer client.Close() + + if err := send(client, Tag(1), &badDecode{}); err != nil { + t.Fatalf("send got err %v expected nil", err) + } + + if _, _, err := recv(server, maximumLength, messageByType); err != ErrNoValidMessage { + t.Fatalf("recv got err %v expected ErrNoValidMessage", err) + } +} + +// unregistered is not registered on decode. +type unregistered struct{} + +func (*unregistered) Decode(b *buffer) {} +func (*unregistered) Encode(b *buffer) {} +func (*unregistered) Type() MsgType { return MsgTypeUnregistered } +func (*unregistered) String() string { return "unregistered{}" } + +func TestRecvInvalidType(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer server.Close() + defer client.Close() + + if err := send(client, Tag(1), &unregistered{}); err != nil { + t.Fatalf("send got err %v expected nil", err) + } + + _, _, err = recv(server, maximumLength, messageByType) + if _, ok := err.(*ErrInvalidMsgType); !ok { + t.Fatalf("recv got err %v expected ErrInvalidMsgType", err) + } +} + +func TestSendRecvWithFile(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer server.Close() + defer client.Close() + + // Create a tempfile. + osf, err := ioutil.TempFile("", "p9") + if err != nil { + t.Fatalf("tempfile got err %v expected nil", err) + } + os.Remove(osf.Name()) + f, err := fd.NewFromFile(osf) + osf.Close() + if err != nil { + t.Fatalf("unable to create file: %v", err) + } + + if err := send(client, Tag(1), &Rlopen{File: f}); err != nil { + t.Fatalf("send got err %v expected nil", err) + } + + // Enable withFile. + tag, m, err := recv(server, maximumLength, messageByType) + if err != nil { + t.Fatalf("recv got err %v expected nil", err) + } + if tag != Tag(1) { + t.Fatalf("got tag %v expected 1", tag) + } + rlopen, ok := m.(*Rlopen) + if !ok { + t.Fatalf("got m %v expected *Rlopen", m) + } + if rlopen.File == nil { + t.Fatalf("got nil file expected non-nil") + } +} + +func TestRecvClosed(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + defer server.Close() + client.Close() + + _, _, err = recv(server, maximumLength, messageByType) + if err == nil { + t.Fatalf("got err nil expected non-nil") + } + if _, ok := err.(ErrSocket); !ok { + t.Fatalf("got err %v expected ErrSocket", err) + } +} + +func TestSendClosed(t *testing.T) { + server, client, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("socketpair got err %v expected nil", err) + } + server.Close() + defer client.Close() + + err = send(client, Tag(1), &Tlopen{}) + if err == nil { + t.Fatalf("send got err nil expected non-nil") + } + if _, ok := err.(ErrSocket); !ok { + t.Fatalf("got err %v expected ErrSocket", err) + } +} + +func init() { + register(&badDecode{}) +} diff --git a/pkg/p9/version.go b/pkg/p9/version.go new file mode 100644 index 000000000..8783eaa7e --- /dev/null +++ b/pkg/p9/version.go @@ -0,0 +1,145 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + // highestSupportedVersion is the highest supported version X in a + // version string of the format 9P2000.L.Google.X. + // + // Clients are expected to start requesting this version number and + // to continuously decrement it until a Tversion request succeeds. + highestSupportedVersion uint32 = 6 + + // lowestSupportedVersion is the lowest supported version X in a + // version string of the format 9P2000.L.Google.X. + // + // Clients are free to send a Tversion request at a version below this + // value but are expected to encounter an Rlerror in response. + lowestSupportedVersion uint32 = 0 + + // baseVersion is the base version of 9P that this package must always + // support. It is equivalent to 9P2000.L.Google.0. + baseVersion = "9P2000.L" +) + +// HighestVersionString returns the highest possible version string that a client +// may request or a server may support. +func HighestVersionString() string { + return versionString(highestSupportedVersion) +} + +// parseVersion parses a Tversion version string into a numeric version number +// if the version string is supported by p9. Otherwise returns (0, false). +// +// From Tversion(9P): "Version strings are defined such that, if the client string +// contains one or more period characters, the initial substring up to but not +// including any single period in the version string defines a version of the protocol." +// +// p9 intentionally diverges from this and always requires that the version string +// start with 9P2000.L to express that it is always compatible with 9P2000.L. The +// only supported versions extensions are of the format 9p2000.L.Google.X where X +// is an ever increasing version counter. +// +// Version 9P2000.L.Google.0 implies 9P2000.L. +// +// New versions must always be a strict superset of 9P2000.L. A version increase must +// define a predicate representing the feature extension introduced by that version. The +// predicate must be commented and should take the format: +// +// // VersionSupportsX returns true if version v supports X and must be checked when ... +// func VersionSupportsX(v int32) bool { +// ... +// ) +func parseVersion(str string) (uint32, bool) { + // Special case the base version which lacks the ".Google.X" suffix. This + // version always means version 0. + if str == baseVersion { + return 0, true + } + substr := strings.Split(str, ".") + if len(substr) != 4 { + return 0, false + } + if substr[0] != "9P2000" || substr[1] != "L" || substr[2] != "Google" || len(substr[3]) == 0 { + return 0, false + } + version, err := strconv.ParseUint(substr[3], 10, 32) + if err != nil { + return 0, false + } + return uint32(version), true +} + +// versionString formats a p9 version number into a Tversion version string. +func versionString(version uint32) string { + // Special case the base version so that clients expecting this string + // instead of the 9P2000.L.Google.0 equivalent get it. This is important + // for backwards compatibility with legacy servers that check for exactly + // the baseVersion and allow nothing else. + if version == 0 { + return baseVersion + } + return fmt.Sprintf("9P2000.L.Google.%d", version) +} + +// VersionSupportsTflushf returns true if version v supports the Tflushf message. +// This predicate must be checked by clients before attempting to make a Tflushf +// request. If this predicate returns false, then clients may safely no-op. +func VersionSupportsTflushf(v uint32) bool { + return v >= 1 +} + +// versionSupportsTwalkgetattr returns true if version v supports the +// Twalkgetattr message. This predicate must be checked by clients before +// attempting to make a Twalkgetattr request. +func versionSupportsTwalkgetattr(v uint32) bool { + return v >= 2 +} + +// versionSupportsTucreation returns true if version v supports the Tucreation +// messages (Tucreate, Tusymlink, Tumkdir, Tumknod). This predicate must be +// checked by clients before attempting to make a Tucreation request. +// If Tucreation messages are not supported, their non-UID supporting +// counterparts (Tlcreate, Tsymlink, Tmkdir, Tmknod) should be used. +func versionSupportsTucreation(v uint32) bool { + return v >= 3 +} + +// VersionSupportsConnect returns true if version v supports the Tlconnect +// message. This predicate must be checked by clients +// before attempting to make a Tlconnect request. If Tlconnect messages are not +// supported, Tlopen should be used. +func VersionSupportsConnect(v uint32) bool { + return v >= 4 +} + +// VersionSupportsAnonymous returns true if version v supports Tlconnect +// with the AnonymousSocket mode. This predicate must be checked by clients +// before attempting to use the AnonymousSocket Tlconnect mode. +func VersionSupportsAnonymous(v uint32) bool { + return v >= 5 +} + +// VersionSupportsMultiUser returns true if version v supports multi-user fake +// directory permissions and ID values. +func VersionSupportsMultiUser(v uint32) bool { + return v >= 6 +} diff --git a/pkg/p9/version_test.go b/pkg/p9/version_test.go new file mode 100644 index 000000000..634ac3ca5 --- /dev/null +++ b/pkg/p9/version_test.go @@ -0,0 +1,145 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package p9 + +import ( + "testing" +) + +func TestVersionNumberEquivalent(t *testing.T) { + for i := uint32(0); i < 1024; i++ { + str := versionString(i) + version, ok := parseVersion(str) + if !ok { + t.Errorf("#%d: parseVersion(%q) failed, want success", i, str) + continue + } + if i != version { + t.Errorf("#%d: got version %d, want %d", i, i, version) + } + } +} + +func TestVersionStringEquivalent(t *testing.T) { + // There is one case where the version is not equivalent on purpose, + // that is 9P2000.L.Google.0. It is not equivalent because versionString + // must always return the more generic 9P2000.L for legacy servers that + // check for it. See net/9p/client.c. + str := "9P2000.L.Google.0" + version, ok := parseVersion(str) + if !ok { + t.Errorf("parseVersion(%q) failed, want success", str) + } + if got := versionString(version); got != "9P2000.L" { + t.Errorf("versionString(%d) got %q, want %q", version, got, "9P2000.L") + } + + for _, test := range []struct { + versionString string + }{ + { + versionString: "9P2000.L", + }, + { + versionString: "9P2000.L.Google.1", + }, + { + versionString: "9P2000.L.Google.347823894", + }, + } { + version, ok := parseVersion(test.versionString) + if !ok { + t.Errorf("parseVersion(%q) failed, want success", test.versionString) + continue + } + if got := versionString(version); got != test.versionString { + t.Errorf("versionString(%d) got %q, want %q", version, got, test.versionString) + } + } +} + +func TestParseVersion(t *testing.T) { + for _, test := range []struct { + versionString string + expectSuccess bool + expectedVersion uint32 + }{ + { + versionString: "9P", + expectSuccess: false, + }, + { + versionString: "9P.L", + expectSuccess: false, + }, + { + versionString: "9P200.L", + expectSuccess: false, + }, + { + versionString: "9P2000", + expectSuccess: false, + }, + { + versionString: "9P2000.L.Google.-1", + expectSuccess: false, + }, + { + versionString: "9P2000.L.Google.", + expectSuccess: false, + }, + { + versionString: "9P2000.L.Google.3546343826724305832", + expectSuccess: false, + }, + { + versionString: "9P2001.L", + expectSuccess: false, + }, + { + versionString: "9P2000.L", + expectSuccess: true, + expectedVersion: 0, + }, + { + versionString: "9P2000.L.Google.0", + expectSuccess: true, + expectedVersion: 0, + }, + { + versionString: "9P2000.L.Google.1", + expectSuccess: true, + expectedVersion: 1, + }, + } { + version, ok := parseVersion(test.versionString) + if ok != test.expectSuccess { + t.Errorf("parseVersion(%q) got (_, %v), want (_, %v)", test.versionString, ok, test.expectSuccess) + continue + } + if !test.expectSuccess { + continue + } + if version != test.expectedVersion { + t.Errorf("parseVersion(%q) got (%d, _), want (%d, _)", test.versionString, version, test.expectedVersion) + } + } +} + +func BenchmarkParseVersion(b *testing.B) { + for n := 0; n < b.N; n++ { + parseVersion("9P2000.L.Google.1") + } +} diff --git a/pkg/refs/BUILD b/pkg/refs/BUILD new file mode 100644 index 000000000..4b7c9345d --- /dev/null +++ b/pkg/refs/BUILD @@ -0,0 +1,37 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "refs_state", + srcs = [ + "refcounter.go", + "refcounter_state.go", + ], + out = "refs_state.go", + package = "refs", +) + +go_library( + name = "refs", + srcs = [ + "refcounter.go", + "refcounter_state.go", + "refs_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/refs", + visibility = ["//:sandbox"], + deps = [ + "//pkg/ilist", + "//pkg/log", + "//pkg/state", + ], +) + +go_test( + name = "refs_test", + size = "small", + srcs = ["refcounter_test.go"], + embed = [":refs"], +) diff --git a/pkg/refs/refcounter.go b/pkg/refs/refcounter.go new file mode 100644 index 000000000..1036553c7 --- /dev/null +++ b/pkg/refs/refcounter.go @@ -0,0 +1,299 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package refs defines an interface for reference counted objects. It +// also provides a drop-in implementation called AtomicRefCount. +package refs + +import ( + "reflect" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/ilist" +) + +// RefCounter is the interface to be implemented by objects that are reference +// counted. +type RefCounter interface { + // IncRef increments the reference counter on the object. + IncRef() + + // DecRef decrements the reference counter on the object. + // + // Note that AtomicRefCounter.DecRef() does not support destructors. + // If a type has a destructor, it must implement its own DecRef() + // method and call AtomicRefCounter.DecRefWithDestructor(destructor). + DecRef() + + // TryIncRef attempts to increase the reference counter on the object, + // but may fail if all references have already been dropped. This + // should be used only in special circumstances, such as WeakRefs. + TryIncRef() bool + + // addWeakRef adds the given weak reference. Note that you should have a + // reference to the object when calling this method. + addWeakRef(*WeakRef) + + // dropWeakRef drops the given weak reference. Note that you should have + // a reference to the object when calling this method. + dropWeakRef(*WeakRef) +} + +// A WeakRefUser is notified when the last non-weak reference is dropped. +type WeakRefUser interface { + // WeakRefGone is called when the last non-weak reference is dropped. + WeakRefGone() +} + +// WeakRef is a weak reference. +type WeakRef struct { + ilist.Entry `state:"nosave"` + + // obj is an atomic value that points to the refCounter. + obj atomic.Value `state:".(savedReference)"` + + // user is notified when the weak ref is zapped by the object getting + // destroyed. + user WeakRefUser +} + +// weakRefPool is a pool of weak references to avoid allocations on the hot path. +var weakRefPool = sync.Pool{ + New: func() interface{} { + return &WeakRef{} + }, +} + +// NewWeakRef acquires a weak reference for the given object. +// +// An optional user will be notified when the last non-weak reference is +// dropped. +// +// Note that you must hold a reference to the object prior to getting a weak +// reference. (But you may drop the non-weak reference after that.) +func NewWeakRef(rc RefCounter, u WeakRefUser) *WeakRef { + w := weakRefPool.Get().(*WeakRef) + w.init(rc, u) + return w +} + +// get attempts to get a normal reference to the underlying object, and returns +// the object. If this weak reference has already been zapped (the object has +// been destroyed) then false is returned. If the object still exists, then +// true is returned. +func (w *WeakRef) get() (RefCounter, bool) { + rc := w.obj.Load().(RefCounter) + if v := reflect.ValueOf(rc); v == reflect.Zero(v.Type()) { + // This pointer has already been zapped by zap() below. We do + // this to ensure that the GC can collect the underlying + // RefCounter objects and they don't hog resources. + return nil, false + } + if !rc.TryIncRef() { + return nil, true + } + return rc, true +} + +// Get attempts to get a normal reference to the underlying object, and returns +// the object. If this fails (the object no longer exists), then nil will be +// returned instead. +func (w *WeakRef) Get() RefCounter { + rc, _ := w.get() + return rc +} + +// Drop drops this weak reference. You should always call drop when you are +// finished with the weak reference. You may not use this object after calling +// drop. +func (w *WeakRef) Drop() { + rc, ok := w.get() + if !ok { + // We've been zapped already. When the refcounter has called + // zap, we're guaranteed it's not holding references. + weakRefPool.Put(w) + return + } + if rc == nil { + // The object is in the process of being destroyed. We can't + // remove this from the object's list, nor can we return this + // object to the pool. It'll just be garbage collected. This is + // a rare edge case, so it's not a big deal. + return + } + + // At this point, we have a reference on the object. So destruction + // of the object (and zapping this weak reference) can't race here. + rc.dropWeakRef(w) + + // And now aren't on the object's list of weak references. So it won't + // zap us if this causes the reference count to drop to zero. + rc.DecRef() + + // Return to the pool. + weakRefPool.Put(w) +} + +// init initializes this weak reference. +func (w *WeakRef) init(rc RefCounter, u WeakRefUser) { + // Reset the contents of the weak reference. + // This is important because we are reseting the atomic value type. + // Otherwise, we could panic here if obj is different than what it was + // the last time this was used. + *w = WeakRef{} + w.user = u + w.obj.Store(rc) + + // In the load path, we may already have a nil value. So we need to + // check whether or not that is the case before calling addWeakRef. + if v := reflect.ValueOf(rc); v != reflect.Zero(v.Type()) { + rc.addWeakRef(w) + } +} + +// zap zaps this weak reference. +func (w *WeakRef) zap() { + // We need to be careful about types here. + // So reflect is involved. But it's not that bad. + rc := w.obj.Load() + typ := reflect.TypeOf(rc) + w.obj.Store(reflect.Zero(typ).Interface()) +} + +// AtomicRefCount keeps a reference count using atomic operations and calls the +// destructor when the count reaches zero. +// +// N.B. To allow the zero-object to be initialized, the count is offset by +// 1, that is, when refCount is n, there are really n+1 references. +type AtomicRefCount struct { + // refCount is composed of two fields: + // + // [32-bit speculative references]:[32-bit real references] + // + // Speculative references are used for TryIncRef, to avoid a + // CompareAndSwap loop. See IncRef, DecRef and TryIncRef for details of + // how these fields are used. + refCount int64 + + // mu protects the list below. + mu sync.Mutex `state:"nosave"` + + // weakRefs is our collection of weak references. + weakRefs ilist.List `state:"nosave"` +} + +// TestReadRefs returns the current reference count of r. Use only for tests. +func (r *AtomicRefCount) TestReadRefs() int64 { + return atomic.LoadInt64(&r.refCount) +} + +// IncRef increments this object's reference count. While the count is kept +// greater than zero, the destructor doesn't get called. +// +// The sanity check here is limited to real references, since if they have +// dropped beneath zero then the object should have been destroyed. +func (r *AtomicRefCount) IncRef() { + if v := atomic.AddInt64(&r.refCount, 1); v <= 0 { + panic("Incrementing non-positive ref count") + } +} + +// TryIncRef attempts to increment the reference count, *unless the count has +// already reached zero*. If false is returned, then the object has already +// been destroyed, and the weak reference is no longer valid. If true if +// returned then a valid reference is now held on the object. +// +// To do this safely without a loop, a speculative reference is first acquired +// on the object. This allows multiple concurrent TryIncRef calls to +// distinguish other TryIncRef calls from genuine references held. +func (r *AtomicRefCount) TryIncRef() bool { + const speculativeRef = 1 << 32 + v := atomic.AddInt64(&r.refCount, speculativeRef) + if int32(v) < 0 { + // This object has already been freed. + atomic.AddInt64(&r.refCount, -speculativeRef) + return false + } + + // Turn into a real reference. + atomic.AddInt64(&r.refCount, -speculativeRef+1) + return true +} + +// addWeakRef adds the given weak reference. +func (r *AtomicRefCount) addWeakRef(w *WeakRef) { + r.mu.Lock() + r.weakRefs.PushBack(w) + r.mu.Unlock() +} + +// dropWeakRef drops the given weak reference. +func (r *AtomicRefCount) dropWeakRef(w *WeakRef) { + r.mu.Lock() + r.weakRefs.Remove(w) + r.mu.Unlock() +} + +// DecRefWithDestructor decrements the object's reference count. If the +// resulting count is negative and the destructor is not nil, then the +// destructor will be called. +// +// Note that speculative references are counted here. Since they were added +// prior to real references reaching zero, they will successfully convert to +// real references. In other words, we see speculative references only in the +// following case: +// +// A: TryIncRef [speculative increase => sees non-negative references] +// B: DecRef [real decrease] +// A: TryIncRef [transform speculative to real] +// +func (r *AtomicRefCount) DecRefWithDestructor(destroy func()) { + switch v := atomic.AddInt64(&r.refCount, -1); { + case v < -1: + panic("Decrementing non-positive ref count") + + case v == -1: + // Zap weak references. Note that at this point, all weak + // references are already invalid. That is, TryIncRef() will + // return false due to the reference count check. + r.mu.Lock() + for !r.weakRefs.Empty() { + w := r.weakRefs.Front().(*WeakRef) + // Capture the callback because w cannot be touched + // after it's zapped -- the owner is free it reuse it + // after that. + user := w.user + r.weakRefs.Remove(w) + w.zap() + + if user != nil { + r.mu.Unlock() + user.WeakRefGone() + r.mu.Lock() + } + } + r.mu.Unlock() + + // Call the destructor. + if destroy != nil { + destroy() + } + } +} + +// DecRef decrements this object's reference count. +func (r *AtomicRefCount) DecRef() { + r.DecRefWithDestructor(nil) +} diff --git a/pkg/refs/refcounter_state.go b/pkg/refs/refcounter_state.go new file mode 100644 index 000000000..1be67f951 --- /dev/null +++ b/pkg/refs/refcounter_state.go @@ -0,0 +1,34 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package refs + +type savedReference struct { + obj interface{} +} + +func (w *WeakRef) saveObj() savedReference { + // We load the object directly, because it is typed. This will be + // serialized and loaded as a typed value. + return savedReference{w.obj.Load()} +} + +func (w *WeakRef) loadObj(v savedReference) { + // See note above. This will be serialized and loaded typed. So we're okay + // as long as refs aren't changing during save and load (which they should + // not be). + // + // w.user is loaded before loadObj is called. + w.init(v.obj.(RefCounter), w.user) +} diff --git a/pkg/refs/refcounter_test.go b/pkg/refs/refcounter_test.go new file mode 100644 index 000000000..cc11bcd71 --- /dev/null +++ b/pkg/refs/refcounter_test.go @@ -0,0 +1,172 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package refs + +import ( + "reflect" + "sync" + "testing" +) + +type testCounter struct { + AtomicRefCount + + // mu protects the boolean below. + mu sync.Mutex + + // destroyed indicates whether this was destroyed. + destroyed bool +} + +func (t *testCounter) DecRef() { + t.AtomicRefCount.DecRefWithDestructor(t.destroy) +} + +func (t *testCounter) destroy() { + t.mu.Lock() + defer t.mu.Unlock() + t.destroyed = true +} + +func (t *testCounter) IsDestroyed() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.destroyed +} + +func newTestCounter() *testCounter { + return &testCounter{destroyed: false} +} + +func TestOneRef(t *testing.T) { + tc := newTestCounter() + tc.DecRef() + + if !tc.IsDestroyed() { + t.Errorf("object should have been destroyed") + } +} + +func TestTwoRefs(t *testing.T) { + tc := newTestCounter() + tc.IncRef() + tc.DecRef() + tc.DecRef() + + if !tc.IsDestroyed() { + t.Errorf("object should have been destroyed") + } +} + +func TestMultiRefs(t *testing.T) { + tc := newTestCounter() + tc.IncRef() + tc.DecRef() + + tc.IncRef() + tc.DecRef() + + tc.DecRef() + + if !tc.IsDestroyed() { + t.Errorf("object should have been destroyed") + } +} + +func TestWeakRef(t *testing.T) { + tc := newTestCounter() + w := NewWeakRef(tc, nil) + + // Try resolving. + if x := w.Get(); x == nil { + t.Errorf("weak reference didn't resolve: expected %v, got nil", tc) + } else { + x.DecRef() + } + + // Try resolving again. + if x := w.Get(); x == nil { + t.Errorf("weak reference didn't resolve: expected %v, got nil", tc) + } else { + x.DecRef() + } + + // Shouldn't be destroyed yet. (Can't continue if this fails.) + if tc.IsDestroyed() { + t.Fatalf("original object destroyed earlier than expected") + } + + // Drop the original reference. + tc.DecRef() + + // Assert destroyed. + if !tc.IsDestroyed() { + t.Errorf("original object not destroyed as expected") + } + + // Shouldn't be anything. + if x := w.Get(); x != nil { + t.Errorf("weak reference resolved: expected nil, got %v", x) + } +} + +func TestWeakRefDrop(t *testing.T) { + tc := newTestCounter() + w := NewWeakRef(tc, nil) + w.Drop() + + // Just assert the list is empty. + if !tc.weakRefs.Empty() { + t.Errorf("weak reference not dropped") + } + + // Drop the original reference. + tc.DecRef() +} + +type testWeakRefUser struct { + weakRefGone func() +} + +func (u *testWeakRefUser) WeakRefGone() { + u.weakRefGone() +} + +func TestCallback(t *testing.T) { + called := false + tc := newTestCounter() + var w *WeakRef + w = NewWeakRef(tc, &testWeakRefUser{func() { + called = true + + // Check that the weak ref has been zapped. + rc := w.obj.Load().(RefCounter) + if v := reflect.ValueOf(rc); v != reflect.Zero(v.Type()) { + t.Fatalf("Callback called with non-nil ptr") + } + + // Check that we're not holding the mutex by acquiring and + // releasing it. + tc.mu.Lock() + tc.mu.Unlock() + }}) + + // Drop the original reference, this must trigger the callback. + tc.DecRef() + + if !called { + t.Fatalf("Callback not called") + } +} diff --git a/pkg/seccomp/BUILD b/pkg/seccomp/BUILD new file mode 100644 index 000000000..1e19b1d25 --- /dev/null +++ b/pkg/seccomp/BUILD @@ -0,0 +1,48 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_embed_data", "go_library", "go_test") + +go_binary( + name = "victim", + testonly = 1, + srcs = ["seccomp_test_victim.go"], + deps = [":seccomp"], +) + +go_embed_data( + name = "victim_data", + testonly = 1, + src = "victim", + package = "seccomp", + var = "victimData", +) + +go_library( + name = "seccomp", + srcs = [ + "seccomp.go", + "seccomp_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/seccomp", + visibility = ["//visibility:public"], + deps = [ + "//pkg/abi/linux", + "//pkg/bpf", + "//pkg/log", + ], +) + +go_test( + name = "seccomp_test", + size = "small", + srcs = [ + "seccomp_test.go", + ":victim_data", + ], + embed = [":seccomp"], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/bpf", + ], +) diff --git a/pkg/seccomp/seccomp.go b/pkg/seccomp/seccomp.go new file mode 100644 index 000000000..7ee63140c --- /dev/null +++ b/pkg/seccomp/seccomp.go @@ -0,0 +1,210 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package seccomp provides basic seccomp filters. +package seccomp + +import ( + "fmt" + "sort" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bpf" + "gvisor.googlesource.com/gvisor/pkg/log" +) + +const ( + // violationLabel is added to the program to take action on a violation. + violationLabel = "violation" + + // allowLabel is added to the program to allow the syscall to take place. + allowLabel = "allow" +) + +// Install generates BPF code based on the set of syscalls provided. It only +// allows syscalls that conform to the specification (*) and generates SIGSYS +// trap unless kill is set. +// +// (*) The current implementation only checks the syscall number. It does NOT +// validate any of the arguments. +func Install(syscalls []uintptr, kill bool) error { + // Sort syscalls and remove duplicates to build the BST. + sort.Slice(syscalls, func(i, j int) bool { return syscalls[i] < syscalls[j] }) + syscalls = filterUnique(syscalls) + + log.Infof("Installing seccomp filters for %d syscalls (kill=%t)", len(syscalls), kill) + for _, s := range syscalls { + log.Infof("syscall filter: %v", s) + } + + instrs, err := buildProgram(syscalls, kill) + if err != nil { + return err + } + if log.IsLogging(log.Debug) { + programStr, err := bpf.DecodeProgram(instrs) + if err != nil { + programStr = fmt.Sprintf("Error: %v\n%s", err, programStr) + } + log.Debugf("Seccomp program dump:\n%s", programStr) + } + + if err := seccomp(instrs); err != nil { + return err + } + + log.Infof("Seccomp filters installed.") + return nil +} + +// buildProgram builds a BPF program that whitelists all given syscalls. +// +// Precondition: syscalls must be sorted and unique. +func buildProgram(syscalls []uintptr, kill bool) ([]linux.BPFInstruction, error) { + const archOffset = 4 // offsetof(seccomp_data, arch) + program := bpf.NewProgramBuilder() + violationAction := uint32(linux.SECCOMP_RET_KILL) + if !kill { + violationAction = linux.SECCOMP_RET_TRAP + } + + // Be paranoid and check that syscall is done in the expected architecture. + // + // A = seccomp_data.arch + // if (A != AUDIT_ARCH_X86_64) goto violation + program.AddStmt(bpf.Ld|bpf.Abs|bpf.W, archOffset) + program.AddJumpFalseLabel(bpf.Jmp|bpf.Jeq|bpf.K, linux.AUDIT_ARCH_X86_64, 0, violationLabel) + + if err := buildIndex(syscalls, program); err != nil { + return nil, err + } + + // violation: return violationAction + if err := program.AddLabel(violationLabel); err != nil { + return nil, err + } + program.AddStmt(bpf.Ret|bpf.K, violationAction) + + // allow: return SECCOMP_RET_ALLOW + if err := program.AddLabel(allowLabel); err != nil { + return nil, err + } + program.AddStmt(bpf.Ret|bpf.K, linux.SECCOMP_RET_ALLOW) + + return program.Instructions() +} + +// filterUnique filters unique system calls. +// +// Precondition: syscalls must be sorted. +func filterUnique(syscalls []uintptr) []uintptr { + filtered := make([]uintptr, 0, len(syscalls)) + for i := 0; i < len(syscalls); i++ { + if len(filtered) > 0 && syscalls[i] == filtered[len(filtered)-1] { + // This call has already been inserted, skip. + continue + } + filtered = append(filtered, syscalls[i]) + } + return filtered +} + +// buildIndex builds a BST to quickly search through all syscalls that are whitelisted. +// +// Precondition: syscalls must be sorted and unique. +func buildIndex(syscalls []uintptr, program *bpf.ProgramBuilder) error { + root := createBST(syscalls) + + // Load syscall number into A and run through BST. + // + // A = seccomp_data.nr + program.AddStmt(bpf.Ld|bpf.Abs|bpf.W, 0) + return root.buildBSTProgram(program, true) +} + +// createBST converts sorted syscall slice into a balanced BST. +// Panics if syscalls is empty. +func createBST(syscalls []uintptr) *node { + i := len(syscalls) / 2 + parent := node{value: syscalls[i]} + if i > 0 { + parent.left = createBST(syscalls[:i]) + } + if i+1 < len(syscalls) { + parent.right = createBST(syscalls[i+1:]) + } + return &parent +} + +// node represents a tree node. +type node struct { + value uintptr + left *node + right *node +} + +// label returns the label corresponding to this node. If node is nil (syscall not present), +// violationLabel is returned for convenience. +func (n *node) label() string { + if n == nil { + return violationLabel + } + return fmt.Sprintf("index_%v", n.value) +} + +// buildBSTProgram converts a binary tree started in 'root' into BPF code. The ouline of the code +// is as follows: +// +// // SYS_PIPE(22), root +// (A == 22) ? goto allow : continue +// (A > 22) ? goto index_35 : goto index_9 +// +// index_9: // SYS_MMAP(9), leaf +// (A == 9) ? goto allow : goto violation +// +// index_35: // SYS_NANOSLEEP(35), single child +// (A == 35) ? goto allow : continue +// (A > 35) ? goto index_50 : goto violation +// +// index_50: // SYS_LISTEN(50), leaf +// (A == 50) ? goto allow : goto violation +// +func (n *node) buildBSTProgram(program *bpf.ProgramBuilder, root bool) error { + if n == nil { + return nil + } + + // Root node is never referenced by label, skip it. + if !root { + if err := program.AddLabel(n.label()); err != nil { + return err + } + } + + // Leaf nodes don't require extra check, they either allow or violate! + if n.left == nil && n.right == nil { + program.AddJumpLabels(bpf.Jmp|bpf.Jeq|bpf.K, uint32(n.value), allowLabel, violationLabel) + return nil + } + + // Non-leaf node. Allows syscall if it matches, check which turn to take otherwise. Note + // that 'violationLabel' is returned for nil children. + program.AddJumpTrueLabel(bpf.Jmp|bpf.Jeq|bpf.K, uint32(n.value), allowLabel, 0) + program.AddJumpLabels(bpf.Jmp|bpf.Jgt|bpf.K, uint32(n.value), n.right.label(), n.left.label()) + + if err := n.left.buildBSTProgram(program, false); err != nil { + return err + } + return n.right.buildBSTProgram(program, false) +} diff --git a/pkg/seccomp/seccomp_test.go b/pkg/seccomp/seccomp_test.go new file mode 100644 index 000000000..c700d88d6 --- /dev/null +++ b/pkg/seccomp/seccomp_test.go @@ -0,0 +1,268 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seccomp + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "os/exec" + "sort" + "strings" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/bpf" +) + +type seccompData struct { + nr uint32 + arch uint32 + instructionPointer uint64 + args [6]uint64 +} + +// newVictim makes a victim binary. +func newVictim() (string, error) { + f, err := ioutil.TempFile("", "victim") + if err != nil { + return "", err + } + defer f.Close() + path := f.Name() + if _, err := io.Copy(f, bytes.NewBuffer(victimData)); err != nil { + os.Remove(path) + return "", err + } + if err := os.Chmod(path, 0755); err != nil { + os.Remove(path) + return "", err + } + return path, nil +} + +// asInput converts a seccompData to a bpf.Input. +func (d *seccompData) asInput() bpf.Input { + return bpf.InputBytes{binary.Marshal(nil, binary.LittleEndian, d), binary.LittleEndian} +} + +func TestBasic(t *testing.T) { + type spec struct { + // desc is the test's description. + desc string + + // data is the input data. + data seccompData + + // want is the expected return value of the BPF program. + want uint32 + } + + for _, test := range []struct { + // filters are the set of syscall that are allowed. + filters []uintptr + kill bool + specs []spec + }{ + { + filters: []uintptr{1}, + kill: false, + specs: []spec{ + { + desc: "Single syscall allowed", + data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_ALLOW, + }, + { + desc: "Single syscall disallowed", + data: seccompData{nr: 2, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + }, + }, + { + filters: []uintptr{1, 3, 5}, + kill: false, + specs: []spec{ + { + desc: "Multiple syscalls allowed (1)", + data: seccompData{nr: 1, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_ALLOW, + }, + { + desc: "Multiple syscalls allowed (3)", + data: seccompData{nr: 3, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_ALLOW, + }, + { + desc: "Multiple syscalls allowed (5)", + data: seccompData{nr: 5, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_ALLOW, + }, + { + desc: "Multiple syscalls disallowed (0)", + data: seccompData{nr: 0, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + { + desc: "Multiple syscalls disallowed (2)", + data: seccompData{nr: 2, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + { + desc: "Multiple syscalls disallowed (4)", + data: seccompData{nr: 4, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + { + desc: "Multiple syscalls disallowed (6)", + data: seccompData{nr: 6, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + { + desc: "Multiple syscalls disallowed (100)", + data: seccompData{nr: 100, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_TRAP, + }, + }, + }, + { + filters: []uintptr{1}, + kill: false, + specs: []spec{ + { + desc: "Wrong architecture", + data: seccompData{nr: 1, arch: 123}, + want: linux.SECCOMP_RET_TRAP, + }, + }, + }, + { + filters: []uintptr{1}, + kill: true, + specs: []spec{ + { + desc: "Syscall disallowed, action kill", + data: seccompData{nr: 2, arch: linux.AUDIT_ARCH_X86_64}, + want: linux.SECCOMP_RET_KILL, + }, + }, + }, + } { + sort.Slice(test.filters, func(i, j int) bool { return test.filters[i] < test.filters[j] }) + instrs, err := buildProgram(test.filters, test.kill) + if err != nil { + t.Errorf("%s: buildProgram() got error: %v", test.specs[0].desc, err) + continue + } + p, err := bpf.Compile(instrs) + if err != nil { + t.Errorf("%s: bpf.Compile() got error: %v", test.specs[0].desc, err) + continue + } + for _, spec := range test.specs { + got, err := bpf.Exec(p, spec.data.asInput()) + if err != nil { + t.Errorf("%s: bpf.Exec() got error: %v", spec.desc, err) + continue + } + if got != spec.want { + t.Errorf("%s: bpd.Exec() = %d, want: %d", spec.desc, got, spec.want) + } + } + } +} + +func TestRandom(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + size := rand.Intn(50) + 1 + syscalls := make([]uintptr, 0, size) + syscallMap := make(map[uintptr]struct{}) + for len(syscalls) < size { + n := uintptr(rand.Intn(200)) + if _, ok := syscallMap[n]; !ok { + syscalls = append(syscalls, n) + syscallMap[n] = struct{}{} + } + } + + sort.Slice(syscalls, func(i, j int) bool { return syscalls[i] < syscalls[j] }) + fmt.Printf("Testing filters: %v", syscalls) + instrs, err := buildProgram(syscalls, false) + if err != nil { + t.Fatalf("buildProgram() got error: %v", err) + } + p, err := bpf.Compile(instrs) + if err != nil { + t.Fatalf("bpf.Compile() got error: %v", err) + } + for i := uint32(0); i < 200; i++ { + data := seccompData{nr: i, arch: linux.AUDIT_ARCH_X86_64} + got, err := bpf.Exec(p, data.asInput()) + if err != nil { + t.Errorf("bpf.Exec() got error: %v, for syscall %d", err, i) + continue + } + want := uint32(linux.SECCOMP_RET_TRAP) + if _, ok := syscallMap[uintptr(i)]; ok { + want = linux.SECCOMP_RET_ALLOW + } + if got != want { + t.Errorf("bpf.Exec() = %d, want: %d, for syscall %d", got, want, i) + } + } +} + +// TestReadDeal checks that a process dies when it trips over the filter and that it +// doesn't die when the filter is not triggered. +func TestRealDeal(t *testing.T) { + for _, test := range []struct { + die bool + want string + }{ + {die: true, want: "bad system call"}, + {die: false, want: "Syscall was allowed!!!"}, + } { + victim, err := newVictim() + if err != nil { + t.Fatalf("unable to get victim: %v", err) + } + defer os.Remove(victim) + dieFlag := fmt.Sprintf("-die=%v", test.die) + cmd := exec.Command(victim, dieFlag) + + out, err := cmd.CombinedOutput() + if test.die { + if err == nil { + t.Errorf("victim was not killed as expected, output: %s", out) + continue + } + } else { + if err != nil { + t.Errorf("victim failed to execute, err: %v", err) + continue + } + } + if !strings.Contains(string(out), test.want) { + t.Errorf("Victim output is wrong, got: %v, want: %v", err, test.want) + continue + } + } +} diff --git a/pkg/seccomp/seccomp_test_victim.go b/pkg/seccomp/seccomp_test_victim.go new file mode 100644 index 000000000..fe3f96901 --- /dev/null +++ b/pkg/seccomp/seccomp_test_victim.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Test binary used to test that seccomp filters are properly constructed and +// indeed kill the process on violation. +package main + +import ( + "flag" + "fmt" + "os" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/seccomp" +) + +func main() { + dieFlag := flag.Bool("die", false, "trips over the filter if true") + flag.Parse() + + syscalls := []uintptr{ + syscall.SYS_ACCEPT, + syscall.SYS_ARCH_PRCTL, + syscall.SYS_BIND, + syscall.SYS_BRK, + syscall.SYS_CLOCK_GETTIME, + syscall.SYS_CLONE, + syscall.SYS_CLOSE, + syscall.SYS_DUP, + syscall.SYS_DUP2, + syscall.SYS_EPOLL_CREATE1, + syscall.SYS_EPOLL_CTL, + syscall.SYS_EPOLL_WAIT, + syscall.SYS_EXIT, + syscall.SYS_EXIT_GROUP, + syscall.SYS_FALLOCATE, + syscall.SYS_FCHMOD, + syscall.SYS_FCNTL, + syscall.SYS_FSTAT, + syscall.SYS_FSYNC, + syscall.SYS_FTRUNCATE, + syscall.SYS_FUTEX, + syscall.SYS_GETDENTS64, + syscall.SYS_GETPEERNAME, + syscall.SYS_GETPID, + syscall.SYS_GETSOCKNAME, + syscall.SYS_GETSOCKOPT, + syscall.SYS_GETTID, + syscall.SYS_GETTIMEOFDAY, + syscall.SYS_LISTEN, + syscall.SYS_LSEEK, + syscall.SYS_MADVISE, + syscall.SYS_MINCORE, + syscall.SYS_MMAP, + syscall.SYS_MPROTECT, + syscall.SYS_MUNLOCK, + syscall.SYS_MUNMAP, + syscall.SYS_NANOSLEEP, + syscall.SYS_NEWFSTATAT, + syscall.SYS_OPEN, + syscall.SYS_POLL, + syscall.SYS_PREAD64, + syscall.SYS_PSELECT6, + syscall.SYS_PWRITE64, + syscall.SYS_READ, + syscall.SYS_READLINKAT, + syscall.SYS_READV, + syscall.SYS_RECVMSG, + syscall.SYS_RENAMEAT, + syscall.SYS_RESTART_SYSCALL, + syscall.SYS_RT_SIGACTION, + syscall.SYS_RT_SIGPROCMASK, + syscall.SYS_RT_SIGRETURN, + syscall.SYS_SCHED_YIELD, + syscall.SYS_SENDMSG, + syscall.SYS_SETITIMER, + syscall.SYS_SET_ROBUST_LIST, + syscall.SYS_SETSOCKOPT, + syscall.SYS_SHUTDOWN, + syscall.SYS_SIGALTSTACK, + syscall.SYS_SOCKET, + syscall.SYS_SYNC_FILE_RANGE, + syscall.SYS_TGKILL, + syscall.SYS_UTIMENSAT, + syscall.SYS_WRITE, + syscall.SYS_WRITEV, + } + die := *dieFlag + if !die { + syscalls = append(syscalls, syscall.SYS_OPENAT) + } + + if err := seccomp.Install(syscalls, false); err != nil { + fmt.Printf("Failed to install seccomp: %v", err) + os.Exit(1) + } + fmt.Printf("Filters installed\n") + + syscall.RawSyscall(syscall.SYS_OPENAT, 0, 0, 0) + fmt.Printf("Syscall was allowed!!!\n") +} diff --git a/pkg/seccomp/seccomp_unsafe.go b/pkg/seccomp/seccomp_unsafe.go new file mode 100644 index 000000000..6682f8d9b --- /dev/null +++ b/pkg/seccomp/seccomp_unsafe.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package seccomp + +import ( + "fmt" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// sockFprog is sock_fprog taken from <linux/filter.h>. +type sockFprog struct { + Len uint16 + pad [6]byte + Filter *linux.BPFInstruction +} + +func seccomp(instrs []linux.BPFInstruction) error { + // SYS_SECCOMP is not available in syscall package. + const SYS_SECCOMP = 317 + + // PR_SET_NO_NEW_PRIVS is required in order to enable seccomp. See seccomp(2) for details. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, linux.PR_SET_NO_NEW_PRIVS, 1, 0); err != 0 { + return fmt.Errorf("failed to set PR_SET_NO_NEW_PRIVS: %v", err) + } + sockProg := sockFprog{Len: uint16(len(instrs)), Filter: (*linux.BPFInstruction)(unsafe.Pointer(&instrs[0]))} + + // TODO: Use SECCOMP_FILTER_FLAG_KILL_PROCESS when available. + if _, _, err := syscall.RawSyscall(SYS_SECCOMP, linux.SECCOMP_SET_MODE_FILTER, linux.SECCOMP_FILTER_FLAG_TSYNC, uintptr(unsafe.Pointer(&sockProg))); err != 0 { + return fmt.Errorf("failed to set seccomp filter: %v", err) + } + return nil +} diff --git a/pkg/secio/BUILD b/pkg/secio/BUILD new file mode 100644 index 000000000..9a28d2c1f --- /dev/null +++ b/pkg/secio/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "secio", + srcs = [ + "full_reader.go", + "secio.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/secio", + visibility = ["//pkg/sentry:internal"], +) + +go_test( + name = "secio_test", + size = "small", + srcs = ["secio_test.go"], + embed = [":secio"], +) diff --git a/pkg/secio/full_reader.go b/pkg/secio/full_reader.go new file mode 100644 index 000000000..b2dbb8615 --- /dev/null +++ b/pkg/secio/full_reader.go @@ -0,0 +1,34 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package secio + +import ( + "io" +) + +// FullReader adapts an io.Reader to never return partial reads with a nil +// error. +type FullReader struct { + Reader io.Reader +} + +// Read implements io.Reader.Read. +func (r FullReader) Read(dst []byte) (int, error) { + n, err := io.ReadFull(r.Reader, dst) + if err == io.ErrUnexpectedEOF { + return n, io.EOF + } + return n, err +} diff --git a/pkg/secio/secio.go b/pkg/secio/secio.go new file mode 100644 index 000000000..fc625efb8 --- /dev/null +++ b/pkg/secio/secio.go @@ -0,0 +1,105 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package secio provides support for sectioned I/O. +package secio + +import ( + "errors" + "io" +) + +// ErrReachedLimit is returned when SectionReader.Read or SectionWriter.Write +// reaches its limit. +var ErrReachedLimit = errors.New("reached limit") + +// SectionReader implements io.Reader on a section of an underlying io.ReaderAt. +// It is similar to io.SectionReader, but: +// +// - Reading beyond the limit returns ErrReachedLimit, not io.EOF. +// +// - Limit overflow is handled correctly. +type SectionReader struct { + r io.ReaderAt + off int64 + limit int64 +} + +// Read implements io.Reader.Read. +func (r *SectionReader) Read(dst []byte) (int, error) { + if r.limit >= 0 { + if max := r.limit - r.off; max < int64(len(dst)) { + dst = dst[:max] + } + } + n, err := r.r.ReadAt(dst, r.off) + r.off += int64(n) + if err == nil && r.off == r.limit { + err = ErrReachedLimit + } + return n, err +} + +// NewOffsetReader returns an io.Reader that reads from r starting at offset +// off. +func NewOffsetReader(r io.ReaderAt, off int64) *SectionReader { + return &SectionReader{r, off, -1} +} + +// NewSectionReader returns an io.Reader that reads from r starting at offset +// off and stops with ErrReachedLimit after n bytes. +func NewSectionReader(r io.ReaderAt, off int64, n int64) *SectionReader { + // If off + n overflows, it will be < 0 such that no limit applies, but + // this is the correct behavior as long as r prohibits reading at offsets + // beyond MaxInt64. + return &SectionReader{r, off, off + n} +} + +// SectionWriter implements io.Writer on a section of an underlying +// io.WriterAt. Writing beyond the limit returns ErrReachedLimit. +type SectionWriter struct { + w io.WriterAt + off int64 + limit int64 +} + +// Write implements io.Writer.Write. +func (w *SectionWriter) Write(src []byte) (int, error) { + if w.limit >= 0 { + if max := w.limit - w.off; max < int64(len(src)) { + src = src[:max] + } + } + n, err := w.w.WriteAt(src, w.off) + w.off += int64(n) + if err == nil && w.off == w.limit { + err = ErrReachedLimit + } + return n, err +} + +// NewOffsetWriter returns an io.Writer that writes to w starting at offset +// off. +func NewOffsetWriter(w io.WriterAt, off int64) *SectionWriter { + return &SectionWriter{w, off, -1} +} + +// NewSectionWriter returns an io.Writer that writes to w starting at offset +// off and stops with ErrReachedLimit after n bytes. +func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter { + // If off + n overflows, it will be < 0 such that no limit applies, but + // this is the correct behavior as long as w prohibits writing at offsets + // beyond MaxInt64. + return &SectionWriter{w, off, off + n} +} diff --git a/pkg/secio/secio_test.go b/pkg/secio/secio_test.go new file mode 100644 index 000000000..64b4cc17d --- /dev/null +++ b/pkg/secio/secio_test.go @@ -0,0 +1,126 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package secio + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "testing" +) + +var errEndOfBuffer = errors.New("write beyond end of buffer") + +// buffer resembles bytes.Buffer, but implements io.ReaderAt and io.WriterAt. +// Reads beyond the end of the buffer return io.EOF. Writes beyond the end of +// the buffer return errEndOfBuffer. +type buffer struct { + Bytes []byte +} + +// ReadAt implements io.ReaderAt.ReadAt. +func (b *buffer) ReadAt(dst []byte, off int64) (int, error) { + if off >= int64(len(b.Bytes)) { + return 0, io.EOF + } + n := copy(dst, b.Bytes[off:]) + if n < len(dst) { + return n, io.EOF + } + return n, nil +} + +// WriteAt implements io.WriterAt.WriteAt. +func (b *buffer) WriteAt(src []byte, off int64) (int, error) { + if off >= int64(len(b.Bytes)) { + return 0, errEndOfBuffer + } + n := copy(b.Bytes[off:], src) + if n < len(src) { + return n, errEndOfBuffer + } + return n, nil +} + +func newBufferString(s string) *buffer { + return &buffer{[]byte(s)} +} + +func TestOffsetReader(t *testing.T) { + buf := newBufferString("foobar") + r := NewOffsetReader(buf, 3) + dst, err := ioutil.ReadAll(r) + if want := []byte("bar"); !bytes.Equal(dst, want) || err != nil { + t.Errorf("ReadAll: got (%q, %v), wanted (%q, nil)", dst, err, want) + } +} + +func TestSectionReader(t *testing.T) { + buf := newBufferString("foobarbaz") + r := NewSectionReader(buf, 3, 3) + dst, err := ioutil.ReadAll(r) + if want, wantErr := []byte("bar"), ErrReachedLimit; !bytes.Equal(dst, want) || err != wantErr { + t.Errorf("ReadAll: got (%q, %v), wanted (%q, %v)", dst, err, want, wantErr) + } +} + +func TestSectionReaderLimitOverflow(t *testing.T) { + // SectionReader behaves like OffsetReader when limit overflows int64. + buf := newBufferString("foobar") + r := NewSectionReader(buf, 3, math.MaxInt64) + dst, err := ioutil.ReadAll(r) + if want := []byte("bar"); !bytes.Equal(dst, want) || err != nil { + t.Errorf("ReadAll: got (%q, %v), wanted (%q, nil)", dst, err, want) + } +} + +func TestOffsetWriter(t *testing.T) { + buf := newBufferString("ABCDEF") + w := NewOffsetWriter(buf, 3) + n, err := w.Write([]byte("foobar")) + if wantN, wantErr := 3, errEndOfBuffer; n != wantN || err != wantErr { + t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := buf.Bytes, []byte("ABCfoo"); !bytes.Equal(got, want) { + t.Errorf("buf.Bytes: got %q, wanted %q", got, want) + } +} + +func TestSectionWriter(t *testing.T) { + buf := newBufferString("ABCDEFGHI") + w := NewSectionWriter(buf, 3, 3) + n, err := w.Write([]byte("foobar")) + if wantN, wantErr := 3, ErrReachedLimit; n != wantN || err != wantErr { + t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := buf.Bytes, []byte("ABCfooGHI"); !bytes.Equal(got, want) { + t.Errorf("buf.Bytes: got %q, wanted %q", got, want) + } +} + +func TestSectionWriterLimitOverflow(t *testing.T) { + // SectionWriter behaves like OffsetWriter when limit overflows int64. + buf := newBufferString("ABCDEF") + w := NewSectionWriter(buf, 3, math.MaxInt64) + n, err := w.Write([]byte("foobar")) + if wantN, wantErr := 3, errEndOfBuffer; n != wantN || err != wantErr { + t.Errorf("WriteAt: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := buf.Bytes, []byte("ABCfoo"); !bytes.Equal(got, want) { + t.Errorf("buf.Bytes: got %q, wanted %q", got, want) + } +} diff --git a/pkg/segment/BUILD b/pkg/segment/BUILD new file mode 100644 index 000000000..964d73af8 --- /dev/null +++ b/pkg/segment/BUILD @@ -0,0 +1,31 @@ +package( + default_visibility = ["//:sandbox"], + licenses = ["notice"], # Apache 2.0 +) + +load("//tools/go_generics:defs.bzl", "go_template") + +go_template( + name = "generic_range", + srcs = ["range.go"], + types = [ + "T", + ], +) + +go_template( + name = "generic_set", + srcs = [ + "set.go", + "set_state.go", + ], + opt_consts = [ + "minDegree", + ], + types = [ + "Key", + "Range", + "Value", + "Functions", + ], +) diff --git a/pkg/segment/range.go b/pkg/segment/range.go new file mode 100644 index 000000000..5ff30d489 --- /dev/null +++ b/pkg/segment/range.go @@ -0,0 +1,77 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +// T is a required type parameter that must be an integral type. +type T uint64 + +// A Range represents a contiguous range of T. +type Range struct { + // Start is the inclusive start of the range. + Start T + + // End is the exclusive end of the range. + End T +} + +// WellFormed returns true if r.Start <= r.End. All other methods on a Range +// require that the Range is well-formed. +func (r Range) WellFormed() bool { + return r.Start <= r.End +} + +// Length returns the length of the range. +func (r Range) Length() T { + return r.End - r.Start +} + +// Contains returns true if r contains x. +func (r Range) Contains(x T) bool { + return r.Start <= x && x < r.End +} + +// Overlaps returns true if r and r2 overlap. +func (r Range) Overlaps(r2 Range) bool { + return r.Start < r2.End && r2.Start < r.End +} + +// IsSupersetOf returns true if r is a superset of r2; that is, the range r2 is +// contained within r. +func (r Range) IsSupersetOf(r2 Range) bool { + return r.Start <= r2.Start && r.End >= r2.End +} + +// Intersect returns a range consisting of the intersection between r and r2. +// If r and r2 do not overlap, Intersect returns a range with unspecified +// bounds, but for which Length() == 0. +func (r Range) Intersect(r2 Range) Range { + if r.Start < r2.Start { + r.Start = r2.Start + } + if r.End > r2.End { + r.End = r2.End + } + if r.End < r.Start { + r.End = r.Start + } + return r +} + +// CanSplitAt returns true if it is legal to split a segment spanning the range +// r at x; that is, splitting at x would produce two ranges, both of which have +// non-zero length. +func (r Range) CanSplitAt(x T) bool { + return r.Contains(x) && r.Start < x +} diff --git a/pkg/segment/set.go b/pkg/segment/set.go new file mode 100644 index 000000000..6eed1d930 --- /dev/null +++ b/pkg/segment/set.go @@ -0,0 +1,1359 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package segment provides tools for working with collections of segments. A +// segment is a key-value mapping, where the key is a non-empty contiguous +// range of values of type Key, and the value is a single value of type Value. +// +// Clients using this package must use the go_template_instance rule in +// tools/go_generics/defs.bzl to create an instantiation of this +// template package, providing types to use in place of Key, Range, Value, and +// Functions. See pkg/segment/test/BUILD for a usage example. +package segment + +import ( + "bytes" + "fmt" +) + +// Key is a required type parameter that must be an integral type. +type Key uint64 + +// Range is a required type parameter equivalent to Range<Key>. +type Range interface{} + +// Value is a required type parameter. +type Value interface{} + +// Functions is a required type parameter that must be a struct implementing +// the methods defined by Functions. +type Functions interface { + // MinKey returns the minimum allowed key. + MinKey() Key + + // MaxKey returns the maximum allowed key + 1. + MaxKey() Key + + // ClearValue deinitializes the given value. (For example, if Value is a + // pointer or interface type, ClearValue should set it to nil.) + ClearValue(*Value) + + // Merge attempts to merge the values corresponding to two consecutive + // segments. If successful, Merge returns (merged value, true). Otherwise, + // it returns (unspecified, false). + // + // Preconditions: r1.End == r2.Start. + // + // Postconditions: If merging succeeds, val1 and val2 are invalidated. + Merge(r1 Range, val1 Value, r2 Range, val2 Value) (Value, bool) + + // Split splits a segment's value at a key within its range, such that the + // first returned value corresponds to the range [r.Start, split) and the + // second returned value corresponds to the range [split, r.End). + // + // Preconditions: r.Start < split < r.End. + // + // Postconditions: The original value val is invalidated. + Split(r Range, val Value, split Key) (Value, Value) +} + +const ( + // minDegree is the minimum degree of an internal node in a Set B-tree. + // + // - Any non-root node has at least minDegree-1 segments. + // + // - Any non-root internal (non-leaf) node has at least minDegree children. + // + // - The root node may have fewer than minDegree-1 segments, but it may + // only have 0 segments if the tree is empty. + // + // Our implementation requires minDegree >= 3. Higher values of minDegree + // usually improve performance, but increase memory usage for small sets. + minDegree = 3 + + maxDegree = 2 * minDegree +) + +// A Set is a mapping of segments with non-overlapping Range keys. The zero +// value for a Set is an empty set. Set values are not safely movable nor +// copyable. Set is thread-compatible. +type Set struct { + root node `state:".(*SegmentDataSlices)"` +} + +// IsEmpty returns true if the set contains no segments. +func (s *Set) IsEmpty() bool { + return s.root.nrSegments == 0 +} + +// IsEmptyRange returns true iff no segments in the set overlap the given +// range. This is semantically equivalent to s.SpanRange(r) == 0, but may be +// more efficient. +func (s *Set) IsEmptyRange(r Range) bool { + switch { + case r.Length() < 0: + panic(fmt.Sprintf("invalid range %v", r)) + case r.Length() == 0: + return true + } + _, gap := s.Find(r.Start) + if !gap.Ok() { + return false + } + return r.End <= gap.End() +} + +// Span returns the total size of all segments in the set. +func (s *Set) Span() Key { + var sz Key + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + sz += seg.Range().Length() + } + return sz +} + +// SpanRange returns the total size of the intersection of segments in the set +// with the given range. +func (s *Set) SpanRange(r Range) Key { + switch { + case r.Length() < 0: + panic(fmt.Sprintf("invalid range %v", r)) + case r.Length() == 0: + return 0 + } + var sz Key + for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() { + sz += seg.Range().Intersect(r).Length() + } + return sz +} + +// FirstSegment returns the first segment in the set. If the set is empty, +// FirstSegment returns a terminal iterator. +func (s *Set) FirstSegment() Iterator { + if s.root.nrSegments == 0 { + return Iterator{} + } + return s.root.firstSegment() +} + +// LastSegment returns the last segment in the set. If the set is empty, +// LastSegment returns a terminal iterator. +func (s *Set) LastSegment() Iterator { + if s.root.nrSegments == 0 { + return Iterator{} + } + return s.root.lastSegment() +} + +// FirstGap returns the first gap in the set. +func (s *Set) FirstGap() GapIterator { + n := &s.root + for n.hasChildren { + n = n.children[0] + } + return GapIterator{n, 0} +} + +// LastGap returns the last gap in the set. +func (s *Set) LastGap() GapIterator { + n := &s.root + for n.hasChildren { + n = n.children[n.nrSegments] + } + return GapIterator{n, n.nrSegments} +} + +// Find returns the segment or gap whose range contains the given key. If a +// segment is found, the returned Iterator is non-terminal and the +// returned GapIterator is terminal. Otherwise, the returned Iterator is +// terminal and the returned GapIterator is non-terminal. +func (s *Set) Find(key Key) (Iterator, GapIterator) { + n := &s.root + for { + // Binary search invariant: the correct value of i lies within [lower, + // upper]. + lower := 0 + upper := n.nrSegments + for lower < upper { + i := lower + (upper-lower)/2 + if r := n.keys[i]; key < r.End { + if key >= r.Start { + return Iterator{n, i}, GapIterator{} + } + upper = i + } else { + lower = i + 1 + } + } + i := lower + if !n.hasChildren { + return Iterator{}, GapIterator{n, i} + } + n = n.children[i] + } +} + +// FindSegment returns the segment whose range contains the given key. If no +// such segment exists, FindSegment returns a terminal iterator. +func (s *Set) FindSegment(key Key) Iterator { + seg, _ := s.Find(key) + return seg +} + +// LowerBoundSegment returns the segment with the lowest range that contains a +// key greater than or equal to min. If no such segment exists, +// LowerBoundSegment returns a terminal iterator. +func (s *Set) LowerBoundSegment(min Key) Iterator { + seg, gap := s.Find(min) + if seg.Ok() { + return seg + } + return gap.NextSegment() +} + +// UpperBoundSegment returns the segment with the highest range that contains a +// key less than or equal to max. If no such segment exists, UpperBoundSegment +// returns a terminal iterator. +func (s *Set) UpperBoundSegment(max Key) Iterator { + seg, gap := s.Find(max) + if seg.Ok() { + return seg + } + return gap.PrevSegment() +} + +// FindGap returns the gap containing the given key. If no such gap exists +// (i.e. the set contains a segment containing that key), FindGap returns a +// terminal iterator. +func (s *Set) FindGap(key Key) GapIterator { + _, gap := s.Find(key) + return gap +} + +// LowerBoundGap returns the gap with the lowest range that is greater than or +// equal to min. +func (s *Set) LowerBoundGap(min Key) GapIterator { + seg, gap := s.Find(min) + if gap.Ok() { + return gap + } + return seg.NextGap() +} + +// UpperBoundGap returns the gap with the highest range that is less than or +// equal to max. +func (s *Set) UpperBoundGap(max Key) GapIterator { + seg, gap := s.Find(max) + if gap.Ok() { + return gap + } + return seg.PrevGap() +} + +// Add inserts the given segment into the set and returns true. If the new +// segment can be merged with adjacent segments, Add will do so. If the new +// segment would overlap an existing segment, Add returns false. If Add +// succeeds, all existing iterators are invalidated. +func (s *Set) Add(r Range, val Value) bool { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + gap := s.FindGap(r.Start) + if !gap.Ok() { + return false + } + if r.End > gap.End() { + return false + } + s.Insert(gap, r, val) + return true +} + +// AddWithoutMerging inserts the given segment into the set and returns true. +// If it would overlap an existing segment, AddWithoutMerging does nothing and +// returns false. If AddWithoutMerging succeeds, all existing iterators are +// invalidated. +func (s *Set) AddWithoutMerging(r Range, val Value) bool { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + gap := s.FindGap(r.Start) + if !gap.Ok() { + return false + } + if r.End > gap.End() { + return false + } + s.InsertWithoutMergingUnchecked(gap, r, val) + return true +} + +// Insert inserts the given segment into the given gap. If the new segment can +// be merged with adjacent segments, Insert will do so. Insert returns an +// iterator to the segment containing the inserted value (which may have been +// merged with other values). All existing iterators (including gap, but not +// including the returned iterator) are invalidated. +// +// If the gap cannot accommodate the segment, or if r is invalid, Insert panics. +// +// Insert is semantically equivalent to a InsertWithoutMerging followed by a +// Merge, but may be more efficient. Note that there is no unchecked variant of +// Insert since Insert must retrieve and inspect gap's predecessor and +// successor segments regardless. +func (s *Set) Insert(gap GapIterator, r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + prev, next := gap.PrevSegment(), gap.NextSegment() + if prev.Ok() && prev.End() > r.Start { + panic(fmt.Sprintf("new segment %v overlaps predecessor %v", r, prev.Range())) + } + if next.Ok() && next.Start() < r.End { + panic(fmt.Sprintf("new segment %v overlaps successor %v", r, next.Range())) + } + if prev.Ok() && prev.End() == r.Start { + if mval, ok := (Functions{}).Merge(prev.Range(), prev.Value(), r, val); ok { + prev.SetEndUnchecked(r.End) + prev.SetValue(mval) + if next.Ok() && next.Start() == r.End { + val = mval + if mval, ok := (Functions{}).Merge(prev.Range(), val, next.Range(), next.Value()); ok { + prev.SetEndUnchecked(next.End()) + prev.SetValue(mval) + return s.Remove(next).PrevSegment() + } + } + return prev + } + } + if next.Ok() && next.Start() == r.End { + if mval, ok := (Functions{}).Merge(r, val, next.Range(), next.Value()); ok { + next.SetStartUnchecked(r.Start) + next.SetValue(mval) + return next + } + } + return s.InsertWithoutMergingUnchecked(gap, r, val) +} + +// InsertWithoutMerging inserts the given segment into the given gap and +// returns an iterator to the inserted segment. All existing iterators +// (including gap, but not including the returned iterator) are invalidated. +// +// If the gap cannot accommodate the segment, or if r is invalid, +// InsertWithoutMerging panics. +func (s *Set) InsertWithoutMerging(gap GapIterator, r Range, val Value) Iterator { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + if gr := gap.Range(); !gr.IsSupersetOf(r) { + panic(fmt.Sprintf("cannot insert segment range %v into gap range %v", r, gr)) + } + return s.InsertWithoutMergingUnchecked(gap, r, val) +} + +// InsertWithoutMergingUnchecked inserts the given segment into the given gap +// and returns an iterator to the inserted segment. All existing iterators +// (including gap, but not including the returned iterator) are invalidated. +// +// Preconditions: r.Start >= gap.Start(); r.End <= gap.End(). +func (s *Set) InsertWithoutMergingUnchecked(gap GapIterator, r Range, val Value) Iterator { + gap = gap.node.rebalanceBeforeInsert(gap) + copy(gap.node.keys[gap.index+1:], gap.node.keys[gap.index:gap.node.nrSegments]) + copy(gap.node.values[gap.index+1:], gap.node.values[gap.index:gap.node.nrSegments]) + gap.node.keys[gap.index] = r + gap.node.values[gap.index] = val + gap.node.nrSegments++ + return Iterator{gap.node, gap.index} +} + +// Remove removes the given segment and returns an iterator to the vacated gap. +// All existing iterators (including seg, but not including the returned +// iterator) are invalidated. +func (s *Set) Remove(seg Iterator) GapIterator { + // We only want to remove directly from a leaf node. + if seg.node.hasChildren { + // Since seg.node has children, the removed segment must have a + // predecessor (at the end of the rightmost leaf of its left child + // subtree). Move the contents of that predecessor into the removed + // segment's position, and remove that predecessor instead. (We choose + // to steal the predecessor rather than the successor because removing + // from the end of a leaf node doesn't involve any copying unless + // merging is required.) + victim := seg.PrevSegment() + // This must be unchecked since until victim is removed, seg and victim + // overlap. + seg.SetRangeUnchecked(victim.Range()) + seg.SetValue(victim.Value()) + return s.Remove(victim).NextGap() + } + copy(seg.node.keys[seg.index:], seg.node.keys[seg.index+1:seg.node.nrSegments]) + copy(seg.node.values[seg.index:], seg.node.values[seg.index+1:seg.node.nrSegments]) + Functions{}.ClearValue(&seg.node.values[seg.node.nrSegments-1]) + seg.node.nrSegments-- + return seg.node.rebalanceAfterRemove(GapIterator{seg.node, seg.index}) +} + +// RemoveAll removes all segments from the set. All existing iterators are +// invalidated. +func (s *Set) RemoveAll() { + s.root = node{} +} + +// RemoveRange removes all segments in the given range. An iterator to the +// newly formed gap is returned, and all existing iterators are invalidated. +func (s *Set) RemoveRange(r Range) GapIterator { + seg, gap := s.Find(r.Start) + if seg.Ok() { + seg = s.Isolate(seg, r) + gap = s.Remove(seg) + } + for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() { + seg = s.Isolate(seg, r) + gap = s.Remove(seg) + } + return gap +} + +// Merge attempts to merge two neighboring segments. If successful, Merge +// returns an iterator to the merged segment, and all existing iterators are +// invalidated. Otherwise, Merge returns a terminal iterator. +// +// If first is not the predecessor of second, Merge panics. +func (s *Set) Merge(first, second Iterator) Iterator { + if first.NextSegment() != second { + panic(fmt.Sprintf("attempt to merge non-neighboring segments %v, %v", first.Range(), second.Range())) + } + return s.MergeUnchecked(first, second) +} + +// MergeUnchecked attempts to merge two neighboring segments. If successful, +// MergeUnchecked returns an iterator to the merged segment, and all existing +// iterators are invalidated. Otherwise, MergeUnchecked returns a terminal +// iterator. +// +// Precondition: first is the predecessor of second: first.NextSegment() == +// second, first == second.PrevSegment(). +func (s *Set) MergeUnchecked(first, second Iterator) Iterator { + if first.End() == second.Start() { + if mval, ok := (Functions{}).Merge(first.Range(), first.Value(), second.Range(), second.Value()); ok { + // N.B. This must be unchecked because until s.Remove(second), first + // overlaps second. + first.SetEndUnchecked(second.End()) + first.SetValue(mval) + return s.Remove(second).PrevSegment() + } + } + return Iterator{} +} + +// MergeAll attempts to merge all adjacent segments in the set. All existing +// iterators are invalidated. +func (s *Set) MergeAll() { + seg := s.FirstSegment() + if !seg.Ok() { + return + } + next := seg.NextSegment() + for next.Ok() { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg, next = mseg, mseg.NextSegment() + } else { + seg, next = next, next.NextSegment() + } + } +} + +// MergeRange attempts to merge all adjacent segments that contain a key in the +// specific range. All existing iterators are invalidated. +func (s *Set) MergeRange(r Range) { + seg := s.LowerBoundSegment(r.Start) + if !seg.Ok() { + return + } + next := seg.NextSegment() + for next.Ok() && next.Range().Start < r.End { + if mseg := s.MergeUnchecked(seg, next); mseg.Ok() { + seg, next = mseg, mseg.NextSegment() + } else { + seg, next = next, next.NextSegment() + } + } +} + +// MergeAdjacent attempts to merge the segment containing r.Start with its +// predecessor, and the segment containing r.End-1 with its successor. +func (s *Set) MergeAdjacent(r Range) { + first := s.FindSegment(r.Start) + if first.Ok() { + if prev := first.PrevSegment(); prev.Ok() { + s.Merge(prev, first) + } + } + last := s.FindSegment(r.End - 1) + if last.Ok() { + if next := last.NextSegment(); next.Ok() { + s.Merge(last, next) + } + } +} + +// Split splits the given segment at the given key and returns iterators to the +// two resulting segments. All existing iterators (including seg, but not +// including the returned iterators) are invalidated. +// +// If the segment cannot be split at split (because split is at the start or +// end of the segment's range, so splitting would produce a segment with zero +// length, or because split falls outside the segment's range altogether), +// Split panics. +func (s *Set) Split(seg Iterator, split Key) (Iterator, Iterator) { + if !seg.Range().CanSplitAt(split) { + panic(fmt.Sprintf("can't split %v at %v", seg.Range(), split)) + } + return s.SplitUnchecked(seg, split) +} + +// SplitUnchecked splits the given segment at the given key and returns +// iterators to the two resulting segments. All existing iterators (including +// seg, but not including the returned iterators) are invalidated. +// +// Preconditions: seg.Start() < key < seg.End(). +func (s *Set) SplitUnchecked(seg Iterator, split Key) (Iterator, Iterator) { + val1, val2 := (Functions{}).Split(seg.Range(), seg.Value(), split) + end2 := seg.End() + seg.SetEndUnchecked(split) + seg.SetValue(val1) + seg2 := s.InsertWithoutMergingUnchecked(seg.NextGap(), Range{split, end2}, val2) + // seg may now be invalid due to the Insert. + return seg2.PrevSegment(), seg2 +} + +// SplitAt splits the segment straddling split, if one exists. SplitAt returns +// true if a segment was split and false otherwise. If SplitAt splits a +// segment, all existing iterators are invalidated. +func (s *Set) SplitAt(split Key) bool { + if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) { + s.SplitUnchecked(seg, split) + return true + } + return false +} + +// Isolate ensures that the given segment's range does not escape r by +// splitting at r.Start and r.End if necessary, and returns an updated iterator +// to the bounded segment. All existing iterators (including seg, but not +// including the returned iterators) are invalidated. +func (s *Set) Isolate(seg Iterator, r Range) Iterator { + if seg.Range().CanSplitAt(r.Start) { + _, seg = s.SplitUnchecked(seg, r.Start) + } + if seg.Range().CanSplitAt(r.End) { + seg, _ = s.SplitUnchecked(seg, r.End) + } + return seg +} + +// ApplyContiguous applies a function to a contiguous range of segments, +// splitting if necessary. The function is applied until the first gap is +// encountered, at which point the gap is returned. If the function is applied +// across the entire range, a terminal gap is returned. All existing iterators +// are invalidated. +// +// N.B. The Iterator must not be invalidated by the function. +func (s *Set) ApplyContiguous(r Range, fn func(seg Iterator)) GapIterator { + seg, gap := s.Find(r.Start) + if !seg.Ok() { + return gap + } + for { + seg = s.Isolate(seg, r) + fn(seg) + if seg.End() >= r.End { + return GapIterator{} + } + gap = seg.NextGap() + if !gap.IsEmpty() { + return gap + } + seg = gap.NextSegment() + if !seg.Ok() { + // This implies that the last segment extended all the + // way to the maximum value, since the gap was empty. + return GapIterator{} + } + } +} + +type node struct { + // An internal binary tree node looks like: + // + // K + // / \ + // Cl Cr + // + // where all keys in the subtree rooted by Cl (the left subtree) are less + // than K (the key of the parent node), and all keys in the subtree rooted + // by Cr (the right subtree) are greater than K. + // + // An internal B-tree node's indexes work out to look like: + // + // K0 K1 K2 ... Kn-1 + // / \/ \/ \ ... / \ + // C0 C1 C2 C3 ... Cn-1 Cn + // + // where n is nrSegments. + nrSegments int + + // parent is a pointer to this node's parent. If this node is root, parent + // is nil. + parent *node + + // parentIndex is the index of this node in parent.children. + parentIndex int + + // Flag for internal nodes that is technically redundant with "children[0] + // != nil", but is stored in the first cache line. "hasChildren" rather + // than "isLeaf" because false must be the correct value for an empty root. + hasChildren bool + + // Nodes store keys and values in separate arrays to maximize locality in + // the common case (scanning keys for lookup). + keys [maxDegree - 1]Range + values [maxDegree - 1]Value + children [maxDegree]*node +} + +// firstSegment returns the first segment in the subtree rooted by n. +// +// Preconditions: n.nrSegments != 0. +func (n *node) firstSegment() Iterator { + for n.hasChildren { + n = n.children[0] + } + return Iterator{n, 0} +} + +// lastSegment returns the last segment in the subtree rooted by n. +// +// Preconditions: n.nrSegments != 0. +func (n *node) lastSegment() Iterator { + for n.hasChildren { + n = n.children[n.nrSegments] + } + return Iterator{n, n.nrSegments - 1} +} + +func (n *node) prevSibling() *node { + if n.parent == nil || n.parentIndex == 0 { + return nil + } + return n.parent.children[n.parentIndex-1] +} + +func (n *node) nextSibling() *node { + if n.parent == nil || n.parentIndex == n.parent.nrSegments { + return nil + } + return n.parent.children[n.parentIndex+1] +} + +// rebalanceBeforeInsert splits n and its ancestors if they are full, as +// required for insertion, and returns an updated iterator to the position +// represented by gap. +func (n *node) rebalanceBeforeInsert(gap GapIterator) GapIterator { + if n.parent != nil { + gap = n.parent.rebalanceBeforeInsert(gap) + } + if n.nrSegments < maxDegree-1 { + return gap + } + if n.parent == nil { + // n is root. Move all segments before and after n's median segment + // into new child nodes adjacent to the median segment, which is now + // the only segment in root. + left := &node{ + nrSegments: minDegree - 1, + parent: n, + parentIndex: 0, + hasChildren: n.hasChildren, + } + right := &node{ + nrSegments: minDegree - 1, + parent: n, + parentIndex: 1, + hasChildren: n.hasChildren, + } + copy(left.keys[:minDegree-1], n.keys[:minDegree-1]) + copy(left.values[:minDegree-1], n.values[:minDegree-1]) + copy(right.keys[:minDegree-1], n.keys[minDegree:]) + copy(right.values[:minDegree-1], n.values[minDegree:]) + n.keys[0], n.values[0] = n.keys[minDegree-1], n.values[minDegree-1] + zeroValueSlice(n.values[1:]) + if n.hasChildren { + copy(left.children[:minDegree], n.children[:minDegree]) + copy(right.children[:minDegree], n.children[minDegree:]) + zeroNodeSlice(n.children[2:]) + for i := 0; i < minDegree; i++ { + left.children[i].parent = left + left.children[i].parentIndex = i + right.children[i].parent = right + right.children[i].parentIndex = i + } + } + n.nrSegments = 1 + n.hasChildren = true + n.children[0] = left + n.children[1] = right + if gap.node != n { + return gap + } + if gap.index < minDegree { + return GapIterator{left, gap.index} + } + return GapIterator{right, gap.index - minDegree} + } + // n is non-root. Move n's median segment into its parent node (which can't + // be full because we've already invoked n.parent.rebalanceBeforeInsert) + // and move all segments after n's median into a new sibling node (the + // median segment's right child subtree). + copy(n.parent.keys[n.parentIndex+1:], n.parent.keys[n.parentIndex:n.parent.nrSegments]) + copy(n.parent.values[n.parentIndex+1:], n.parent.values[n.parentIndex:n.parent.nrSegments]) + n.parent.keys[n.parentIndex], n.parent.values[n.parentIndex] = n.keys[minDegree-1], n.values[minDegree-1] + copy(n.parent.children[n.parentIndex+2:], n.parent.children[n.parentIndex+1:n.parent.nrSegments+1]) + for i := n.parentIndex + 2; i < n.parent.nrSegments+2; i++ { + n.parent.children[i].parentIndex = i + } + sibling := &node{ + nrSegments: minDegree - 1, + parent: n.parent, + parentIndex: n.parentIndex + 1, + hasChildren: n.hasChildren, + } + n.parent.children[n.parentIndex+1] = sibling + n.parent.nrSegments++ + copy(sibling.keys[:minDegree-1], n.keys[minDegree:]) + copy(sibling.values[:minDegree-1], n.values[minDegree:]) + zeroValueSlice(n.values[minDegree-1:]) + if n.hasChildren { + copy(sibling.children[:minDegree], n.children[minDegree:]) + zeroNodeSlice(n.children[minDegree:]) + for i := 0; i < minDegree; i++ { + sibling.children[i].parent = sibling + sibling.children[i].parentIndex = i + } + } + n.nrSegments = minDegree - 1 + // gap.node can't be n.parent because gaps are always in leaf nodes. + if gap.node != n { + return gap + } + if gap.index < minDegree { + return gap + } + return GapIterator{sibling, gap.index - minDegree} +} + +// rebalanceAfterRemove "unsplits" n and its ancestors if they are deficient +// (contain fewer segments than required by B-tree invariants), as required for +// removal, and returns an updated iterator to the position represented by gap. +// +// Precondition: n is the only node in the tree that may currently violate a +// B-tree invariant. +func (n *node) rebalanceAfterRemove(gap GapIterator) GapIterator { + for { + if n.nrSegments >= minDegree-1 { + return gap + } + if n.parent == nil { + // Root is allowed to be deficient. + return gap + } + // There's one other thing we can do before resorting to unsplitting. + // If either sibling node has at least minDegree segments, rotate that + // sibling's closest segment through the segment in the parent that + // separates us. That is, given: + // + // ... D ... + // / \ + // ... B C] [E ... + // + // where the node containing E is deficient, end up with: + // + // ... C ... + // / \ + // ... B] [D E ... + // + // As in Set.Remove, prefer rotating from the end of the sibling to the + // left: by precondition, n.node has fewer segments (to memcpy) than + // the sibling does. + if sibling := n.prevSibling(); sibling != nil && sibling.nrSegments >= minDegree { + copy(n.keys[1:], n.keys[:n.nrSegments]) + copy(n.values[1:], n.values[:n.nrSegments]) + n.keys[0] = n.parent.keys[n.parentIndex-1] + n.values[0] = n.parent.values[n.parentIndex-1] + n.parent.keys[n.parentIndex-1] = sibling.keys[sibling.nrSegments-1] + n.parent.values[n.parentIndex-1] = sibling.values[sibling.nrSegments-1] + Functions{}.ClearValue(&sibling.values[sibling.nrSegments-1]) + if n.hasChildren { + copy(n.children[1:], n.children[:n.nrSegments+1]) + n.children[0] = sibling.children[sibling.nrSegments] + sibling.children[sibling.nrSegments] = nil + n.children[0].parent = n + n.children[0].parentIndex = 0 + for i := 1; i < n.nrSegments+2; i++ { + n.children[i].parentIndex = i + } + } + n.nrSegments++ + sibling.nrSegments-- + if gap.node == sibling && gap.index == sibling.nrSegments { + return GapIterator{n, 0} + } + if gap.node == n { + return GapIterator{n, gap.index + 1} + } + return gap + } + if sibling := n.nextSibling(); sibling != nil && sibling.nrSegments >= minDegree { + n.keys[n.nrSegments] = n.parent.keys[n.parentIndex] + n.values[n.nrSegments] = n.parent.values[n.parentIndex] + n.parent.keys[n.parentIndex] = sibling.keys[0] + n.parent.values[n.parentIndex] = sibling.values[0] + copy(sibling.keys[:sibling.nrSegments-1], sibling.keys[1:]) + copy(sibling.values[:sibling.nrSegments-1], sibling.values[1:]) + Functions{}.ClearValue(&sibling.values[sibling.nrSegments-1]) + if n.hasChildren { + n.children[n.nrSegments+1] = sibling.children[0] + copy(sibling.children[:sibling.nrSegments], sibling.children[1:]) + sibling.children[sibling.nrSegments] = nil + n.children[n.nrSegments+1].parent = n + n.children[n.nrSegments+1].parentIndex = n.nrSegments + 1 + for i := 0; i < sibling.nrSegments; i++ { + sibling.children[i].parentIndex = i + } + } + n.nrSegments++ + sibling.nrSegments-- + if gap.node == sibling { + if gap.index == 0 { + return GapIterator{n, n.nrSegments} + } + return GapIterator{sibling, gap.index - 1} + } + return gap + } + // Otherwise, we must unsplit. + p := n.parent + if p.nrSegments == 1 { + // Merge all segments in both n and its sibling back into n.parent. + // This is the reverse of the root splitting case in + // node.rebalanceBeforeInsert. (Because we require minDegree >= 3, + // only root can have 1 segment in this path, so this reduces the + // height of the tree by 1, without violating the constraint that + // all leaf nodes remain at the same depth.) + left, right := p.children[0], p.children[1] + p.nrSegments = left.nrSegments + right.nrSegments + 1 + p.hasChildren = left.hasChildren + p.keys[left.nrSegments] = p.keys[0] + p.values[left.nrSegments] = p.values[0] + copy(p.keys[:left.nrSegments], left.keys[:left.nrSegments]) + copy(p.values[:left.nrSegments], left.values[:left.nrSegments]) + copy(p.keys[left.nrSegments+1:], right.keys[:right.nrSegments]) + copy(p.values[left.nrSegments+1:], right.values[:right.nrSegments]) + if left.hasChildren { + copy(p.children[:left.nrSegments+1], left.children[:left.nrSegments+1]) + copy(p.children[left.nrSegments+1:], right.children[:right.nrSegments+1]) + for i := 0; i < p.nrSegments+1; i++ { + p.children[i].parent = p + p.children[i].parentIndex = i + } + } else { + p.children[0] = nil + p.children[1] = nil + } + if gap.node == left { + return GapIterator{p, gap.index} + } + if gap.node == right { + return GapIterator{p, gap.index + left.nrSegments + 1} + } + return gap + } + // Merge n and either sibling, along with the segment separating the + // two, into whichever of the two nodes comes first. This is the + // reverse of the non-root splitting case in + // node.rebalanceBeforeInsert. + var left, right *node + if n.parentIndex > 0 { + left = n.prevSibling() + right = n + } else { + left = n + right = n.nextSibling() + } + // Fix up gap first since we need the old left.nrSegments, which + // merging will change. + if gap.node == right { + gap = GapIterator{left, gap.index + left.nrSegments + 1} + } + left.keys[left.nrSegments] = p.keys[left.parentIndex] + left.values[left.nrSegments] = p.values[left.parentIndex] + copy(left.keys[left.nrSegments+1:], right.keys[:right.nrSegments]) + copy(left.values[left.nrSegments+1:], right.values[:right.nrSegments]) + if left.hasChildren { + copy(left.children[left.nrSegments+1:], right.children[:right.nrSegments+1]) + for i := left.nrSegments + 1; i < left.nrSegments+right.nrSegments+2; i++ { + left.children[i].parent = left + left.children[i].parentIndex = i + } + } + left.nrSegments += right.nrSegments + 1 + copy(p.keys[left.parentIndex:], p.keys[left.parentIndex+1:p.nrSegments]) + copy(p.values[left.parentIndex:], p.values[left.parentIndex+1:p.nrSegments]) + Functions{}.ClearValue(&p.values[p.nrSegments-1]) + copy(p.children[left.parentIndex+1:], p.children[left.parentIndex+2:p.nrSegments+1]) + for i := 0; i < p.nrSegments; i++ { + p.children[i].parentIndex = i + } + p.children[p.nrSegments] = nil + p.nrSegments-- + // This process robs p of one segment, so recurse into rebalancing p. + n = p + } +} + +// A Iterator is conceptually one of: +// +// - A pointer to a segment in a set; or +// +// - A terminal iterator, which is a sentinel indicating that the end of +// iteration has been reached. +// +// Iterators are copyable values and are meaningfully equality-comparable. The +// zero value of Iterator is a terminal iterator. +// +// Unless otherwise specified, any mutation of a set invalidates all existing +// iterators into the set. +type Iterator struct { + // node is the node containing the iterated segment. If the iterator is + // terminal, node is nil. + node *node + + // index is the index of the segment in node.keys/values. + index int +} + +// Ok returns true if the iterator is not terminal. All other methods are only +// valid for non-terminal iterators. +func (seg Iterator) Ok() bool { + return seg.node != nil +} + +// Range returns the iterated segment's range key. +func (seg Iterator) Range() Range { + return seg.node.keys[seg.index] +} + +// Start is equivalent to Range().Start, but should be preferred if only the +// start of the range is needed. +func (seg Iterator) Start() Key { + return seg.node.keys[seg.index].Start +} + +// End is equivalent to Range().End, but should be preferred if only the end of +// the range is needed. +func (seg Iterator) End() Key { + return seg.node.keys[seg.index].End +} + +// SetRangeUnchecked mutates the iterated segment's range key. This operation +// does not invalidate any iterators. +// +// Preconditions: +// +// - r.Length() > 0. +// +// - The new range must not overlap an existing one: If seg.NextSegment().Ok(), +// then r.end <= seg.NextSegment().Start(); if seg.PrevSegment().Ok(), then +// r.start >= seg.PrevSegment().End(). +func (seg Iterator) SetRangeUnchecked(r Range) { + seg.node.keys[seg.index] = r +} + +// SetRange mutates the iterated segment's range key. If the new range would +// cause the iterated segment to overlap another segment, or if the new range +// is invalid, SetRange panics. This operation does not invalidate any +// iterators. +func (seg Iterator) SetRange(r Range) { + if r.Length() <= 0 { + panic(fmt.Sprintf("invalid segment range %v", r)) + } + if prev := seg.PrevSegment(); prev.Ok() && r.Start < prev.End() { + panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, prev.Range())) + } + if next := seg.NextSegment(); next.Ok() && r.End > next.Start() { + panic(fmt.Sprintf("new segment range %v overlaps segment range %v", r, next.Range())) + } + seg.SetRangeUnchecked(r) +} + +// SetStartUnchecked mutates the iterated segment's start. This operation does +// not invalidate any iterators. +// +// Preconditions: The new start must be valid: start < seg.End(); if +// seg.PrevSegment().Ok(), then start >= seg.PrevSegment().End(). +func (seg Iterator) SetStartUnchecked(start Key) { + seg.node.keys[seg.index].Start = start +} + +// SetStart mutates the iterated segment's start. If the new start value would +// cause the iterated segment to overlap another segment, or would result in an +// invalid range, SetStart panics. This operation does not invalidate any +// iterators. +func (seg Iterator) SetStart(start Key) { + if start >= seg.End() { + panic(fmt.Sprintf("new start %v would invalidate segment range %v", start, seg.Range())) + } + if prev := seg.PrevSegment(); prev.Ok() && start < prev.End() { + panic(fmt.Sprintf("new start %v would cause segment range %v to overlap segment range %v", start, seg.Range(), prev.Range())) + } + seg.SetStartUnchecked(start) +} + +// SetEndUnchecked mutates the iterated segment's end. This operation does not +// invalidate any iterators. +// +// Preconditions: The new end must be valid: end > seg.Start(); if +// seg.NextSegment().Ok(), then end <= seg.NextSegment().Start(). +func (seg Iterator) SetEndUnchecked(end Key) { + seg.node.keys[seg.index].End = end +} + +// SetEnd mutates the iterated segment's end. If the new end value would cause +// the iterated segment to overlap another segment, or would result in an +// invalid range, SetEnd panics. This operation does not invalidate any +// iterators. +func (seg Iterator) SetEnd(end Key) { + if end <= seg.Start() { + panic(fmt.Sprintf("new end %v would invalidate segment range %v", end, seg.Range())) + } + if next := seg.NextSegment(); next.Ok() && end > next.Start() { + panic(fmt.Sprintf("new end %v would cause segment range %v to overlap segment range %v", end, seg.Range(), next.Range())) + } + seg.SetEndUnchecked(end) +} + +// Value returns a copy of the iterated segment's value. +func (seg Iterator) Value() Value { + return seg.node.values[seg.index] +} + +// ValuePtr returns a pointer to the iterated segment's value. The pointer is +// invalidated if the iterator is invalidated. This operation does not +// invalidate any iterators. +func (seg Iterator) ValuePtr() *Value { + return &seg.node.values[seg.index] +} + +// SetValue mutates the iterated segment's value. This operation does not +// invalidate any iterators. +func (seg Iterator) SetValue(val Value) { + seg.node.values[seg.index] = val +} + +// PrevSegment returns the iterated segment's predecessor. If there is no +// preceding segment, PrevSegment returns a terminal iterator. +func (seg Iterator) PrevSegment() Iterator { + if seg.node.hasChildren { + return seg.node.children[seg.index].lastSegment() + } + if seg.index > 0 { + return Iterator{seg.node, seg.index - 1} + } + if seg.node.parent == nil { + return Iterator{} + } + return segmentBeforePosition(seg.node.parent, seg.node.parentIndex) +} + +// NextSegment returns the iterated segment's successor. If there is no +// succeeding segment, NextSegment returns a terminal iterator. +func (seg Iterator) NextSegment() Iterator { + if seg.node.hasChildren { + return seg.node.children[seg.index+1].firstSegment() + } + if seg.index < seg.node.nrSegments-1 { + return Iterator{seg.node, seg.index + 1} + } + if seg.node.parent == nil { + return Iterator{} + } + return segmentAfterPosition(seg.node.parent, seg.node.parentIndex) +} + +// PrevGap returns the gap immediately before the iterated segment. +func (seg Iterator) PrevGap() GapIterator { + if seg.node.hasChildren { + // Note that this isn't recursive because the last segment in a subtree + // must be in a leaf node. + return seg.node.children[seg.index].lastSegment().NextGap() + } + return GapIterator{seg.node, seg.index} +} + +// NextGap returns the gap immediately after the iterated segment. +func (seg Iterator) NextGap() GapIterator { + if seg.node.hasChildren { + return seg.node.children[seg.index+1].firstSegment().PrevGap() + } + return GapIterator{seg.node, seg.index + 1} +} + +// PrevNonEmpty returns the iterated segment's predecessor if it is adjacent, +// or the gap before the iterated segment otherwise. If seg.Start() == +// Functions.MinKey(), PrevNonEmpty will return two terminal iterators. +// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be +// non-terminal. +func (seg Iterator) PrevNonEmpty() (Iterator, GapIterator) { + gap := seg.PrevGap() + if gap.Range().Length() != 0 { + return Iterator{}, gap + } + return gap.PrevSegment(), GapIterator{} +} + +// NextNonEmpty returns the iterated segment's successor if it is adjacent, or +// the gap after the iterated segment otherwise. If seg.End() == +// Functions.MaxKey(), NextNonEmpty will return two terminal iterators. +// Otherwise, exactly one of the iterators returned by NextNonEmpty will be +// non-terminal. +func (seg Iterator) NextNonEmpty() (Iterator, GapIterator) { + gap := seg.NextGap() + if gap.Range().Length() != 0 { + return Iterator{}, gap + } + return gap.NextSegment(), GapIterator{} +} + +// A GapIterator is conceptually one of: +// +// - A pointer to a position between two segments, before the first segment, or +// after the last segment in a set, called a *gap*; or +// +// - A terminal iterator, which is a sentinel indicating that the end of +// iteration has been reached. +// +// Note that the gap between two adjacent segments exists (iterators to it are +// non-terminal), but has a length of zero. GapIterator.IsEmpty returns true +// for such gaps. An empty set contains a single gap, spanning the entire range +// of the set's keys. +// +// GapIterators are copyable values and are meaningfully equality-comparable. +// The zero value of GapIterator is a terminal iterator. +// +// Unless otherwise specified, any mutation of a set invalidates all existing +// iterators into the set. +type GapIterator struct { + // The representation of a GapIterator is identical to that of an Iterator, + // except that index corresponds to positions between segments in the same + // way as for node.children (see comment for node.nrSegments). + node *node + index int +} + +// Ok returns true if the iterator is not terminal. All other methods are only +// valid for non-terminal iterators. +func (gap GapIterator) Ok() bool { + return gap.node != nil +} + +// Range returns the range spanned by the iterated gap. +func (gap GapIterator) Range() Range { + return Range{gap.Start(), gap.End()} +} + +// Start is equivalent to Range().Start, but should be preferred if only the +// start of the range is needed. +func (gap GapIterator) Start() Key { + if ps := gap.PrevSegment(); ps.Ok() { + return ps.End() + } + return Functions{}.MinKey() +} + +// End is equivalent to Range().End, but should be preferred if only the end of +// the range is needed. +func (gap GapIterator) End() Key { + if ns := gap.NextSegment(); ns.Ok() { + return ns.Start() + } + return Functions{}.MaxKey() +} + +// IsEmpty returns true if the iterated gap is empty (that is, the "gap" is +// between two adjacent segments.) +func (gap GapIterator) IsEmpty() bool { + return gap.Range().Length() == 0 +} + +// PrevSegment returns the segment immediately before the iterated gap. If no +// such segment exists, PrevSegment returns a terminal iterator. +func (gap GapIterator) PrevSegment() Iterator { + return segmentBeforePosition(gap.node, gap.index) +} + +// NextSegment returns the segment immediately after the iterated gap. If no +// such segment exists, NextSegment returns a terminal iterator. +func (gap GapIterator) NextSegment() Iterator { + return segmentAfterPosition(gap.node, gap.index) +} + +// PrevGap returns the iterated gap's predecessor. If no such gap exists, +// PrevGap returns a terminal iterator. +func (gap GapIterator) PrevGap() GapIterator { + seg := gap.PrevSegment() + if !seg.Ok() { + return GapIterator{} + } + return seg.PrevGap() +} + +// NextGap returns the iterated gap's successor. If no such gap exists, NextGap +// returns a terminal iterator. +func (gap GapIterator) NextGap() GapIterator { + seg := gap.NextSegment() + if !seg.Ok() { + return GapIterator{} + } + return seg.NextGap() +} + +// segmentBeforePosition returns the predecessor segment of the position given +// by n.children[i], which may or may not contain a child. If no such segment +// exists, segmentBeforePosition returns a terminal iterator. +func segmentBeforePosition(n *node, i int) Iterator { + for i == 0 { + if n.parent == nil { + return Iterator{} + } + n, i = n.parent, n.parentIndex + } + return Iterator{n, i - 1} +} + +// segmentAfterPosition returns the successor segment of the position given by +// n.children[i], which may or may not contain a child. If no such segment +// exists, segmentAfterPosition returns a terminal iterator. +func segmentAfterPosition(n *node, i int) Iterator { + for i == n.nrSegments { + if n.parent == nil { + return Iterator{} + } + n, i = n.parent, n.parentIndex + } + return Iterator{n, i} +} + +func zeroValueSlice(slice []Value) { + // TODO: check if Go is actually smart enough to optimize a + // ClearValue that assigns nil to a memset here + for i := range slice { + Functions{}.ClearValue(&slice[i]) + } +} + +func zeroNodeSlice(slice []*node) { + for i := range slice { + slice[i] = nil + } +} + +// String stringifies a Set for debugging. +func (s *Set) String() string { + return s.root.String() +} + +// String stringifes a node (and all of its children) for debugging. +func (n *node) String() string { + var buf bytes.Buffer + n.writeDebugString(&buf, "") + return buf.String() +} + +func (n *node) writeDebugString(buf *bytes.Buffer, prefix string) { + if n.hasChildren != (n.nrSegments > 0 && n.children[0] != nil) { + buf.WriteString(prefix) + buf.WriteString(fmt.Sprintf("WARNING: inconsistent value of hasChildren: got %v, want %v\n", n.hasChildren, !n.hasChildren)) + } + for i := 0; i < n.nrSegments; i++ { + if child := n.children[i]; child != nil { + cprefix := fmt.Sprintf("%s- % 3d ", prefix, i) + if child.parent != n || child.parentIndex != i { + buf.WriteString(cprefix) + buf.WriteString(fmt.Sprintf("WARNING: inconsistent linkage to parent: got (%p, %d), want (%p, %d)\n", child.parent, child.parentIndex, n, i)) + } + child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, i)) + } + buf.WriteString(prefix) + buf.WriteString(fmt.Sprintf("- % 3d: %v => %v\n", i, n.keys[i], n.values[i])) + } + if child := n.children[n.nrSegments]; child != nil { + child.writeDebugString(buf, fmt.Sprintf("%s- % 3d ", prefix, n.nrSegments)) + } +} + +// SegmentDataSlices represents segments from a set as slices of start, end, and +// values. SegmentDataSlices is primarily used as an intermediate representation +// for save/restore and the layout here is optimized for that. +type SegmentDataSlices struct { + Start []Key + End []Key + Values []Value +} + +// ExportSortedSlice returns a copy of all segments in the given set, in ascending +// key order. +func (s *Set) ExportSortedSlices() *SegmentDataSlices { + var sds SegmentDataSlices + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + sds.Start = append(sds.Start, seg.Start()) + sds.End = append(sds.End, seg.End()) + sds.Values = append(sds.Values, seg.Value()) + } + sds.Start = sds.Start[:len(sds.Start):len(sds.Start)] + sds.End = sds.End[:len(sds.End):len(sds.End)] + sds.Values = sds.Values[:len(sds.Values):len(sds.Values)] + return &sds +} + +// ImportSortedSlice initializes the given set from the given slice. +// +// Preconditions: s must be empty. sds must represent a valid set (the segments +// in sds must have valid lengths that do not overlap). The segments in sds +// must be sorted in ascending key order. +func (s *Set) ImportSortedSlices(sds *SegmentDataSlices) error { + if !s.IsEmpty() { + return fmt.Errorf("cannot import into non-empty set %v", s) + } + gap := s.FirstGap() + for i := range sds.Start { + r := Range{sds.Start[i], sds.End[i]} + if !gap.Range().IsSupersetOf(r) { + return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i]) + } + gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap() + } + return nil +} diff --git a/pkg/segment/set_state.go b/pkg/segment/set_state.go new file mode 100644 index 000000000..a763d1915 --- /dev/null +++ b/pkg/segment/set_state.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +func (s *Set) saveRoot() *SegmentDataSlices { + return s.ExportSortedSlices() +} + +func (s *Set) loadRoot(sds *SegmentDataSlices) { + if err := s.ImportSortedSlices(sds); err != nil { + panic(err) + } +} diff --git a/pkg/segment/test/BUILD b/pkg/segment/test/BUILD new file mode 100644 index 000000000..9d398d71a --- /dev/null +++ b/pkg/segment/test/BUILD @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:private"], + licenses = ["notice"], # Apache 2.0 +) + +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "int_range", + out = "int_range.go", + package = "segment", + template = "//pkg/segment:generic_range", + types = { + "T": "int", + }, +) + +go_template_instance( + name = "int_set", + out = "int_set.go", + package = "segment", + template = "//pkg/segment:generic_set", + types = { + "Key": "int", + "Range": "Range", + "Value": "int", + "Functions": "setFunctions", + }, +) + +go_library( + name = "segment", + testonly = 1, + srcs = [ + "int_range.go", + "int_set.go", + "set_functions.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/segment/segment", + deps = [ + "//pkg/state", + ], +) + +go_test( + name = "segment_test", + size = "small", + srcs = ["segment_test.go"], + embed = [":segment"], +) diff --git a/pkg/segment/test/segment_test.go b/pkg/segment/test/segment_test.go new file mode 100644 index 000000000..7ea24b177 --- /dev/null +++ b/pkg/segment/test/segment_test.go @@ -0,0 +1,564 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +import ( + "fmt" + "math/rand" + "testing" +) + +const ( + // testSize is the baseline number of elements inserted into sets under + // test, and is chosen to be large enough to ensure interesting amounts of + // tree rebalancing. + // + // Note that because checkSet is called between each insertion/removal in + // some tests that use it, tests may be quadratic in testSize. + testSize = 8000 + + // valueOffset is the difference between the value and start of test + // segments. + valueOffset = 100000 +) + +func shuffle(xs []int) { + for i := range xs { + j := rand.Intn(i + 1) + xs[i], xs[j] = xs[j], xs[i] + } +} + +func randPermutation(size int) []int { + p := make([]int, size) + for i := range p { + p[i] = i + } + shuffle(p) + return p +} + +// checkSet returns an error if s is incorrectly sorted, does not contain +// exactly expectedSegments segments, or contains a segment for which val != +// key + valueOffset. +func checkSet(s *Set, expectedSegments int) error { + havePrev := false + prev := 0 + nrSegments := 0 + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + next := seg.Start() + if havePrev && prev >= next { + return fmt.Errorf("incorrect order: key %d (segment %d) >= key %d (segment %d)", prev, nrSegments-1, next, nrSegments) + } + if got, want := seg.Value(), seg.Start()+valueOffset; got != want { + return fmt.Errorf("segment %d has key %d, value %d (expected %d)", nrSegments, seg.Start, got, want) + } + prev = next + havePrev = true + nrSegments++ + } + if nrSegments != expectedSegments { + return fmt.Errorf("incorrect number of segments: got %d, wanted %d", nrSegments, expectedSegments) + } + return nil +} + +// countSegmentsIn returns the number of segments in s. +func countSegmentsIn(s *Set) int { + var count int + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + count++ + } + return count +} + +func TestAddRandom(t *testing.T) { + var s Set + order := randPermutation(testSize) + var nrInsertions int + for i, j := range order { + if !s.AddWithoutMerging(Range{j, j + 1}, j+valueOffset) { + t.Errorf("Iteration %d: failed to insert segment with key %d", i, j) + break + } + nrInsertions++ + if err := checkSet(&s, nrInsertions); err != nil { + t.Errorf("Iteration %d: %v", i, err) + break + } + } + if got, want := countSegmentsIn(&s), nrInsertions; got != want { + t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want) + } + if t.Failed() { + t.Logf("Insertion order: %v", order[:nrInsertions]) + t.Logf("Set contents:\n%v", &s) + } +} + +func TestRemoveRandom(t *testing.T) { + var s Set + for i := 0; i < testSize; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) { + t.Fatalf("Failed to insert segment %d", i) + } + } + order := randPermutation(testSize) + var nrRemovals int + for i, j := range order { + seg := s.FindSegment(j) + if !seg.Ok() { + t.Errorf("Iteration %d: failed to find segment with key %d", i, j) + break + } + s.Remove(seg) + nrRemovals++ + if err := checkSet(&s, testSize-nrRemovals); err != nil { + t.Errorf("Iteration %d: %v", i, err) + break + } + } + if got, want := countSegmentsIn(&s), testSize-nrRemovals; got != want { + t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want) + } + if t.Failed() { + t.Logf("Removal order: %v", order[:nrRemovals]) + t.Logf("Set contents:\n%v", &s) + t.FailNow() + } +} + +func TestAddSequentialAdjacent(t *testing.T) { + var s Set + var nrInsertions int + for i := 0; i < testSize; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i+valueOffset) { + t.Fatalf("Failed to insert segment %d", i) + } + nrInsertions++ + if err := checkSet(&s, nrInsertions); err != nil { + t.Errorf("Iteration %d: %v", i, err) + break + } + } + if got, want := countSegmentsIn(&s), nrInsertions; got != want { + t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want) + } + if t.Failed() { + t.Logf("Set contents:\n%v", &s) + } + + first := s.FirstSegment() + gotSeg, gotGap := first.PrevNonEmpty() + if wantGap := s.FirstGap(); gotSeg.Ok() || gotGap != wantGap { + t.Errorf("FirstSegment().PrevNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", gotSeg, gotGap, wantGap) + } + gotSeg, gotGap = first.NextNonEmpty() + if wantSeg := first.NextSegment(); gotSeg != wantSeg || gotGap.Ok() { + t.Errorf("FirstSegment().NextNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", gotSeg, gotGap, wantSeg) + } + + last := s.LastSegment() + gotSeg, gotGap = last.PrevNonEmpty() + if wantSeg := last.PrevSegment(); gotSeg != wantSeg || gotGap.Ok() { + t.Errorf("LastSegment().PrevNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", gotSeg, gotGap, wantSeg) + } + gotSeg, gotGap = last.NextNonEmpty() + if wantGap := s.LastGap(); gotSeg.Ok() || gotGap != wantGap { + t.Errorf("LastSegment().NextNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", gotSeg, gotGap, wantGap) + } + + for seg := first.NextSegment(); seg != last; seg = seg.NextSegment() { + gotSeg, gotGap = seg.PrevNonEmpty() + if wantSeg := seg.PrevSegment(); gotSeg != wantSeg || gotGap.Ok() { + t.Errorf("%v.PrevNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", seg, gotSeg, gotGap, wantSeg) + } + gotSeg, gotGap = seg.NextNonEmpty() + if wantSeg := seg.NextSegment(); gotSeg != wantSeg || gotGap.Ok() { + t.Errorf("%v.NextNonEmpty(): got (%v, %v), wanted (%v, <terminal iterator>)", seg, gotSeg, gotGap, wantSeg) + } + } +} + +func TestAddSequentialNonAdjacent(t *testing.T) { + var s Set + var nrInsertions int + for i := 0; i < testSize; i++ { + // The range here differs from TestAddSequentialAdjacent so that + // consecutive segments are not adjacent. + if !s.AddWithoutMerging(Range{2 * i, 2*i + 1}, 2*i+valueOffset) { + t.Fatalf("Failed to insert segment %d", i) + } + nrInsertions++ + if err := checkSet(&s, nrInsertions); err != nil { + t.Errorf("Iteration %d: %v", i, err) + break + } + } + if got, want := countSegmentsIn(&s), nrInsertions; got != want { + t.Errorf("Wrong final number of segments: got %d, wanted %d", got, want) + } + if t.Failed() { + t.Logf("Set contents:\n%v", &s) + } + + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + gotSeg, gotGap := seg.PrevNonEmpty() + if wantGap := seg.PrevGap(); gotSeg.Ok() || gotGap != wantGap { + t.Errorf("%v.PrevNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", seg, gotSeg, gotGap, wantGap) + } + gotSeg, gotGap = seg.NextNonEmpty() + if wantGap := seg.NextGap(); gotSeg.Ok() || gotGap != wantGap { + t.Errorf("%v.NextNonEmpty(): got (%v, %v), wanted (<terminal iterator>, %v)", seg, gotSeg, gotGap, wantGap) + } + } +} + +func TestMergeSplit(t *testing.T) { + tests := []struct { + name string + initial []Range + split bool + splitAddr int + final []Range + }{ + { + name: "Add merges after existing segment", + initial: []Range{{1000, 1100}, {1100, 1200}}, + final: []Range{{1000, 1200}}, + }, + { + name: "Add merges before existing segment", + initial: []Range{{1100, 1200}, {1000, 1100}}, + final: []Range{{1000, 1200}}, + }, + { + name: "Add merges between existing segments", + initial: []Range{{1000, 1100}, {1200, 1300}, {1100, 1200}}, + final: []Range{{1000, 1300}}, + }, + { + name: "SplitAt does nothing at a free address", + initial: []Range{{100, 200}}, + split: true, + splitAddr: 300, + final: []Range{{100, 200}}, + }, + { + name: "SplitAt does nothing at the beginning of a segment", + initial: []Range{{100, 200}}, + split: true, + splitAddr: 100, + final: []Range{{100, 200}}, + }, + { + name: "SplitAt does nothing at the end of a segment", + initial: []Range{{100, 200}}, + split: true, + splitAddr: 200, + final: []Range{{100, 200}}, + }, + { + name: "SplitAt splits in the middle of a segment", + initial: []Range{{100, 200}}, + split: true, + splitAddr: 150, + final: []Range{{100, 150}, {150, 200}}, + }, + } +Tests: + for _, test := range tests { + var s Set + for _, r := range test.initial { + if !s.Add(r, 0) { + t.Errorf("%s: Add(%v) failed; set contents:\n%v", test.name, r, &s) + continue Tests + } + } + if test.split { + s.SplitAt(test.splitAddr) + } + var i int + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + if i > len(test.final) { + t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, countSegmentsIn(&s), len(test.final), &s) + continue Tests + } + if got, want := seg.Range(), test.final[i]; got != want { + t.Errorf("%s: Segment %d mismatch: got %v, wanted %v; set contents:\n%v", test.name, i, got, want, &s) + continue Tests + } + i++ + } + if i < len(test.final) { + t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, i, len(test.final), &s) + } + } +} + +func TestIsolate(t *testing.T) { + tests := []struct { + name string + initial Range + bounds Range + final []Range + }{ + { + name: "Isolate does not split a segment that falls inside bounds", + initial: Range{100, 200}, + bounds: Range{100, 200}, + final: []Range{{100, 200}}, + }, + { + name: "Isolate splits at beginning of segment", + initial: Range{50, 200}, + bounds: Range{100, 200}, + final: []Range{{50, 100}, {100, 200}}, + }, + { + name: "Isolate splits at end of segment", + initial: Range{100, 250}, + bounds: Range{100, 200}, + final: []Range{{100, 200}, {200, 250}}, + }, + { + name: "Isolate splits at beginning and end of segment", + initial: Range{50, 250}, + bounds: Range{100, 200}, + final: []Range{{50, 100}, {100, 200}, {200, 250}}, + }, + } +Tests: + for _, test := range tests { + var s Set + seg := s.Insert(s.FirstGap(), test.initial, 0) + seg = s.Isolate(seg, test.bounds) + if !test.bounds.IsSupersetOf(seg.Range()) { + t.Errorf("%s: Isolated segment %v lies outside bounds %v; set contents:\n%v", test.name, seg.Range(), test.bounds, &s) + } + var i int + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + if i > len(test.final) { + t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, countSegmentsIn(&s), len(test.final), &s) + continue Tests + } + if got, want := seg.Range(), test.final[i]; got != want { + t.Errorf("%s: Segment %d mismatch: got %v, wanted %v; set contents:\n%v", test.name, i, got, want, &s) + continue Tests + } + i++ + } + if i < len(test.final) { + t.Errorf("%s: Incorrect number of segments: got %d, wanted %d; set contents:\n%v", test.name, i, len(test.final), &s) + } + } +} + +func benchmarkAddSequential(b *testing.B, size int) { + for n := 0; n < b.N; n++ { + var s Set + for i := 0; i < size; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + } +} + +func benchmarkAddRandom(b *testing.B, size int) { + order := randPermutation(size) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + var s Set + for _, i := range order { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + } +} + +func benchmarkFindSequential(b *testing.B, size int) { + var s Set + for i := 0; i < size; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for i := 0; i < size; i++ { + if seg := s.FindSegment(i); !seg.Ok() { + b.Fatalf("Failed to find segment %d", i) + } + } + } +} + +func benchmarkFindRandom(b *testing.B, size int) { + var s Set + for i := 0; i < size; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + order := randPermutation(size) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, i := range order { + if si := s.FindSegment(i); !si.Ok() { + b.Fatalf("Failed to find segment %d", i) + } + } + } +} + +func benchmarkIteration(b *testing.B, size int) { + var s Set + for i := 0; i < size; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + + b.ResetTimer() + var count uint64 + for n := 0; n < b.N; n++ { + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + count++ + } + } + if got, want := count, uint64(size)*uint64(b.N); got != want { + b.Fatalf("Iterated wrong number of segments: got %d, wanted %d", got, want) + } +} + +func benchmarkAddFindRemoveSequential(b *testing.B, size int) { + for n := 0; n < b.N; n++ { + var s Set + for i := 0; i < size; i++ { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + for i := 0; i < size; i++ { + seg := s.FindSegment(i) + if !seg.Ok() { + b.Fatalf("Failed to find segment %d", i) + } + s.Remove(seg) + } + if !s.IsEmpty() { + b.Fatalf("Set not empty after all removals:\n%v", &s) + } + } +} + +func benchmarkAddFindRemoveRandom(b *testing.B, size int) { + order := randPermutation(size) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + var s Set + for _, i := range order { + if !s.AddWithoutMerging(Range{i, i + 1}, i) { + b.Fatalf("Failed to insert segment %d", i) + } + } + for _, i := range order { + seg := s.FindSegment(i) + if !seg.Ok() { + b.Fatalf("Failed to find segment %d", i) + } + s.Remove(seg) + } + if !s.IsEmpty() { + b.Fatalf("Set not empty after all removals:\n%v", &s) + } + } +} + +// Although we don't generally expect our segment sets to get this big, they're +// useful for emulating the effect of cache pressure. +var testSizes = []struct { + desc string + size int +}{ + {"64", 1 << 6}, + {"256", 1 << 8}, + {"1K", 1 << 10}, + {"4K", 1 << 12}, + {"16K", 1 << 14}, + {"64K", 1 << 16}, +} + +func BenchmarkAddSequential(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkAddSequential(b, test.size) + }) + } +} + +func BenchmarkAddRandom(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkAddRandom(b, test.size) + }) + } +} + +func BenchmarkFindSequential(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkFindSequential(b, test.size) + }) + } +} + +func BenchmarkFindRandom(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkFindRandom(b, test.size) + }) + } +} + +func BenchmarkIteration(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkIteration(b, test.size) + }) + } +} + +func BenchmarkAddFindRemoveSequential(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkAddFindRemoveSequential(b, test.size) + }) + } +} + +func BenchmarkAddFindRemoveRandom(b *testing.B) { + for _, test := range testSizes { + b.Run(test.desc, func(b *testing.B) { + benchmarkAddFindRemoveRandom(b, test.size) + }) + } +} diff --git a/pkg/segment/test/set_functions.go b/pkg/segment/test/set_functions.go new file mode 100644 index 000000000..37c196ea1 --- /dev/null +++ b/pkg/segment/test/set_functions.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +// Basic numeric constants that we define because the math package doesn't. +// TODO: These should be Math.MaxInt64/MinInt64? +const ( + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +type setFunctions struct{} + +func (setFunctions) MinKey() int { + return minInt +} + +func (setFunctions) MaxKey() int { + return maxInt +} + +func (setFunctions) ClearValue(*int) {} + +func (setFunctions) Merge(_ Range, val1 int, _ Range, _ int) (int, bool) { + return val1, true +} + +func (setFunctions) Split(_ Range, val int, _ int) (int, int) { + return val, val +} diff --git a/pkg/sentry/BUILD b/pkg/sentry/BUILD new file mode 100644 index 000000000..d18cf3555 --- /dev/null +++ b/pkg/sentry/BUILD @@ -0,0 +1,12 @@ +# This BUILD file defines a package_group that allows for interdependencies for +# sentry-internal packages. + +package(licenses = ["notice"]) # Apache 2.0 + +package_group( + name = "internal", + packages = [ + "//pkg/sentry/...", + "//runsc/...", + ], +) diff --git a/pkg/sentry/arch/BUILD b/pkg/sentry/arch/BUILD new file mode 100644 index 000000000..a88f57ac7 --- /dev/null +++ b/pkg/sentry/arch/BUILD @@ -0,0 +1,66 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "arch_state", + srcs = [ + "arch.go", + "arch_amd64.go", + "arch_state_x86.go", + "arch_x86.go", + "auxv.go", + "signal_amd64.go", + ], + out = "arch_state.go", + package = "arch", +) + +go_library( + name = "arch", + srcs = [ + "aligned.go", + "arch.go", + "arch_amd64.go", + "arch_amd64.s", + "arch_state.go", + "arch_state_x86.go", + "arch_x86.go", + "auxv.go", + "signal_act.go", + "signal_amd64.go", + "signal_info.go", + "signal_stack.go", + "stack.go", + "syscalls_amd64.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/arch", + visibility = ["//:sandbox"], + deps = [ + ":registers_go_proto", + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/cpuid", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/sentry/limits", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + ], +) + +proto_library( + name = "registers_proto", + srcs = ["registers.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "registers_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/arch/registers_go_proto", + proto = ":registers_proto", + visibility = ["//visibility:public"], +) diff --git a/pkg/sentry/arch/aligned.go b/pkg/sentry/arch/aligned.go new file mode 100644 index 000000000..193232e27 --- /dev/null +++ b/pkg/sentry/arch/aligned.go @@ -0,0 +1,31 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +import ( + "reflect" +) + +// alignedBytes returns a slice of size bytes, aligned in memory to the given +// alignment. This is used because we require certain structures to be aligned +// in a specific way (for example, the X86 floating point data). +func alignedBytes(size, alignment uint) []byte { + data := make([]byte, size+alignment-1) + offset := uint(reflect.ValueOf(data).Index(0).Addr().Pointer() % uintptr(alignment)) + if offset == 0 { + return data[:size:size] + } + return data[alignment-offset:][:size:size] +} diff --git a/pkg/sentry/arch/arch.go b/pkg/sentry/arch/arch.go new file mode 100644 index 000000000..021789e4b --- /dev/null +++ b/pkg/sentry/arch/arch.go @@ -0,0 +1,351 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package arch provides abstractions around architecture-dependent details, +// such as syscall calling conventions, native types, etc. +package arch + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Arch describes an architecture. +type Arch int + +const ( + // AMD64 is the x86-64 architecture. + AMD64 Arch = iota +) + +// String implements fmt.Stringer. +func (a Arch) String() string { + switch a { + case AMD64: + return "amd64" + default: + return fmt.Sprintf("Arch(%d)", a) + } +} + +// FloatingPointData is a generic type, and will always be passed as a pointer. +// We rely on the individual arch implementations to meet all the necessary +// requirements. For example, on x86 the region must be 16-byte aligned and 512 +// bytes in size. +type FloatingPointData byte + +// Context provides architecture-dependent information for a specific thread. +// +// NOTE: Currently we use uintptr here to refer to a generic native +// register value. While this will work for the foreseeable future, it isn't +// strictly correct. We may want to create some abstraction that makes this +// more clear or enables us to store values of arbitrary widths. This is +// particularly true for RegisterMap(). +type Context interface { + // Arch returns the architecture for this Context. + Arch() Arch + + // Native converts a generic type to a native value. + // + // Because the architecture is not specified here, we may be dealing + // with return values of varying sizes (for example ARCH_GETFS). This + // is a simple utility function to convert to the native size in these + // cases, and then we can CopyOut. + Native(val uintptr) interface{} + + // Value converts a native type back to a generic value. + // Once a value has been converted to native via the above call -- it + // can be converted back here. + Value(val interface{}) uintptr + + // Width returns the number of bytes for a native value. + Width() uint + + // Fork creates a clone of the context. + Fork() Context + + // SyscallNo returns the syscall number. + SyscallNo() uintptr + + // SyscallArgs returns the syscall arguments in an array. + SyscallArgs() SyscallArguments + + // Return returns the return value for a system call. + Return() uintptr + + // SetReturn sets the return value for a system call. + SetReturn(value uintptr) + + // RestartSyscall reverses over the current syscall instruction, such that + // when the application resumes execution the syscall will be re-attempted. + RestartSyscall() + + // RestartSyscallWithRestartBlock reverses over the current syscall + // instraction and overwrites the current syscall number with that of + // restart_syscall(2). This causes the application to restart the current + // syscall with a custom function when execution resumes. + RestartSyscallWithRestartBlock() + + // IP returns the current instruction pointer. + IP() uintptr + + // SetIP sets the current instruction pointer. + SetIP(value uintptr) + + // Stack returns the current stack pointer. + Stack() uintptr + + // SetStack sets the current stack pointer. + SetStack(value uintptr) + + // SetRSEQInterruptedIP sets the register that contains the old IP when a + // restartable sequence is interrupted. + SetRSEQInterruptedIP(value uintptr) + + // StateData returns a pointer to underlying architecture state. + StateData() *State + + // RegisterMap returns a map of all registers. + RegisterMap() (map[string]uintptr, error) + + // NewSignalAct returns a new object that is equivalent to struct sigaction + // in the guest architecture. + NewSignalAct() NativeSignalAct + + // NewSignalStack returns a new object that is equivalent to stack_t in the + // guest architecture. + NewSignalStack() NativeSignalStack + + // SignalSetup modifies the context in preparation for handling the + // given signal. + // + // st is the stack where the signal handler frame should be + // constructed. + // + // act is the SignalAct that specifies how this signal is being + // handled. + // + // info is the SignalInfo of the signal being delivered. + // + // alt is the alternate signal stack (even if the alternate signal + // stack is not going to be used). + // + // sigset is the signal mask before entering the signal handler. + SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt *SignalStack, sigset linux.SignalSet) error + + // SignalRestore restores context after returning from a signal + // handler. + // + // st is the current thread stack. + // + // rt is true if SignalRestore is being entered from rt_sigreturn and + // false if SignalRestore is being entered from sigreturn. + // SignalRestore returns the thread's new signal mask. + SignalRestore(st *Stack, rt bool) (linux.SignalSet, error) + + // CPUIDEmulate emulates a CPUID instruction according to current register state. + CPUIDEmulate(l log.Logger) + + // SingleStep returns true if single stepping is enabled. + SingleStep() bool + + // SetSingleStep enables single stepping. + SetSingleStep() + + // ClearSingleStep disables single stepping. + ClearSingleStep() + + // FloatingPointData will be passed to underlying save routines. + FloatingPointData() *FloatingPointData + + // NewMmapLayout returns a layout for a new MM, where MinAddr for the + // returned layout must be no lower than min, and MaxAddr for the returned + // layout must be no higher than max. Repeated calls to NewMmapLayout may + // return different layouts. + NewMmapLayout(min, max usermem.Addr, limits *limits.LimitSet) (MmapLayout, error) + + // PIELoadAddress returns a preferred load address for a + // position-independent executable within l. + PIELoadAddress(l MmapLayout) usermem.Addr + + // FeatureSet returns the FeatureSet in use in this context. + FeatureSet() *cpuid.FeatureSet + + // Hack around our package dependences being too broken to support the + // equivalent of arch_ptrace(): + + // PtracePeekUser implements ptrace(PTRACE_PEEKUSR). + PtracePeekUser(addr uintptr) (interface{}, error) + + // PtracePokeUser implements ptrace(PTRACE_POKEUSR). + PtracePokeUser(addr, data uintptr) error + + // PtraceGetRegs implements ptrace(PTRACE_GETREGS) by writing the + // general-purpose registers represented by this Context to dst and + // returning the number of bytes written. + PtraceGetRegs(dst io.Writer) (int, error) + + // PtraceSetRegs implements ptrace(PTRACE_SETREGS) by reading + // general-purpose registers from src into this Context and returning the + // number of bytes read. + PtraceSetRegs(src io.Reader) (int, error) + + // PtraceGetFPRegs implements ptrace(PTRACE_GETFPREGS) by writing the + // floating-point registers represented by this Context to addr in dst and + // returning the number of bytes written. + PtraceGetFPRegs(dst io.Writer) (int, error) + + // PtraceSetFPRegs implements ptrace(PTRACE_SETFPREGS) by reading + // floating-point registers from src into this Context and returning the + // number of bytes read. + PtraceSetFPRegs(src io.Reader) (int, error) + + // PtraceGetRegSet implements ptrace(PTRACE_GETREGSET) by writing the + // register set given by architecture-defined value regset from this + // Context to dst and returning the number of bytes written, which must be + // less than or equal to maxlen. + PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) + + // PtraceSetRegSet implements ptrace(PTRACE_SETREGSET) by reading the + // register set given by architecture-defined value regset from src and + // returning the number of bytes read, which must be less than or equal to + // maxlen. + PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) + + // FullRestore returns 'true' if all CPU registers must be restored + // when switching to the untrusted application. Typically a task enters + // and leaves the kernel via a system call. Platform.Switch() may + // optimize for this by not saving/restoring all registers if allowed + // by the ABI. For e.g. the amd64 ABI specifies that syscall clobbers + // %rcx and %r11. If FullRestore returns true then these optimizations + // must be disabled and all registers restored. + FullRestore() bool +} + +// MmapDirection is a search direction for mmaps. +type MmapDirection int + +const ( + // MmapBottomUp instructs mmap to prefer lower addresses. + MmapBottomUp MmapDirection = iota + + // MmapTopDown instructs mmap to prefer higher addresses. + MmapTopDown +) + +// MmapLayout defines the layout of the user address space for a particular +// MemoryManager. +// +// Note that "highest address" below is always exclusive. +type MmapLayout struct { + // MinAddr is the lowest mappable address. + MinAddr usermem.Addr + + // MaxAddr is the highest mappable address. + MaxAddr usermem.Addr + + // BottomUpBase is the lowest address that may be returned for a + // MmapBottomUp mmap. + BottomUpBase usermem.Addr + + // TopDownBase is the highest address that may be returned for a + // MmapTopDown mmap. + TopDownBase usermem.Addr + + // DefaultDirection is the direction for most non-fixed mmaps in this + // layout. + DefaultDirection MmapDirection + + // MaxStackRand is the maximum randomization to apply to stack + // allocations to maintain a proper gap between the stack and + // TopDownBase. + MaxStackRand uint64 +} + +// Valid returns true if this layout is valid. +func (m *MmapLayout) Valid() bool { + if m.MinAddr > m.MaxAddr { + return false + } + if m.BottomUpBase < m.MinAddr { + return false + } + if m.BottomUpBase > m.MaxAddr { + return false + } + if m.TopDownBase < m.MinAddr { + return false + } + if m.TopDownBase > m.MaxAddr { + return false + } + return true +} + +// SyscallArgument is an argument supplied to a syscall implementation. The +// methods used to access the arguments are named after the ***C type name*** and +// they convert to the closest Go type available. For example, Int() refers to a +// 32-bit signed integer argument represented in Go as an int32. +// +// Using the accessor methods guarantees that the conversion between types is +// correct, taking into account size and signedness (i.e., zero-extension vs +// signed-extension). +type SyscallArgument struct { + // Prefer to use accessor methods instead of 'Value' directly. + Value uintptr +} + +// SyscallArguments represents the set of arguments passed to a syscall. +type SyscallArguments [6]SyscallArgument + +// Pointer returns the usermem.Addr representation of a pointer argument. +func (a SyscallArgument) Pointer() usermem.Addr { + return usermem.Addr(a.Value) +} + +// Int returns the int32 representation of a 32-bit signed integer argument. +func (a SyscallArgument) Int() int32 { + return int32(a.Value) +} + +// Uint returns the uint32 representation of a 32-bit unsigned integer argument. +func (a SyscallArgument) Uint() uint32 { + return uint32(a.Value) +} + +// Int64 returns the int64 representation of a 64-bit signed integer argument. +func (a SyscallArgument) Int64() int64 { + return int64(a.Value) +} + +// Uint64 returns the uint64 representation of a 64-bit unsigned integer argument. +func (a SyscallArgument) Uint64() uint64 { + return uint64(a.Value) +} + +// SizeT returns the uint representation of a size_t argument. +func (a SyscallArgument) SizeT() uint { + return uint(a.Value) +} + +// ModeT returns the int representation of a mode_t argument. +func (a SyscallArgument) ModeT() uint { + return uint(uint16(a.Value)) +} diff --git a/pkg/sentry/arch/arch_amd64.go b/pkg/sentry/arch/arch_amd64.go new file mode 100644 index 000000000..23526fe8e --- /dev/null +++ b/pkg/sentry/arch/arch_amd64.go @@ -0,0 +1,302 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +import ( + "bytes" + "fmt" + "math/rand" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// These constants come directly from Linux. +const ( + // maxAddr64 is the maximum userspace address. It is TASK_SIZE in Linux + // for a 64-bit process. + maxAddr64 usermem.Addr = (1 << 47) - usermem.PageSize + + // maxStackRand64 is the maximum randomization to apply to the stack. + // It is defined by arch/x86/mm/mmap.c:stack_maxrandom_size in Linux. + maxStackRand64 = 16 << 30 // 16 GB + + // maxMmapRand64 is the maximum randomization to apply to the mmap + // layout. It is defined by arch/x86/mm/mmap.c:arch_mmap_rnd in Linux. + maxMmapRand64 = (1 << 28) * usermem.PageSize + + // minGap64 is the minimum gap to leave at the top of the address space + // for the stack. It is defined by arch/x86/mm/mmap.c:MIN_GAP in Linux. + minGap64 = (128 << 20) + maxStackRand64 + + // preferredPIELoadAddr is the standard Linux position-independent + // executable base load address. It is ELF_ET_DYN_BASE in Linux. + // + // The Platform {Min,Max}UserAddress() may preclude loading at this + // address. See other preferredFoo comments below. + preferredPIELoadAddr usermem.Addr = maxAddr64 / 3 * 2 +) + +// These constants are selected as heuristics to help make the Platform's +// potentially limited address space conform as closely to Linux as possible. +const ( + // Select a preferred minimum TopDownBase address. + // + // Some applications (TSAN and other *SANs) are very particular about + // the way the Linux mmap allocator layouts out the address space. + // + // TSAN in particular expects top down allocations to be made in the + // range [0x7e8000000000, 0x800000000000). + // + // The minimum TopDownBase on Linux would be: + // 0x800000000000 - minGap64 - maxMmapRand64 = 0x7efbf8000000. + // + // (minGap64 because TSAN uses a small RLIMIT_STACK.) + // + // 0x7e8000000000 is selected arbitrarily by TSAN to leave room for + // allocations below TopDownBase. + // + // N.B. ASAN and MSAN are more forgiving; ASAN allows allocations all + // the way down to 0x10007fff8000, and MSAN down to 0x700000000000. + // + // Of course, there is no hard minimum to allocation; an allocator can + // search all the way from TopDownBase to Min. However, TSAN declared + // their range "good enough". + // + // We would like to pick a TopDownBase such that it is unlikely that an + // allocator will select an address below TSAN's minimum. We achieve + // this by trying to leave a sizable gap below TopDownBase. + // + // This is all "preferred" because the layout min/max address may not + // allow us to select such a TopDownBase, in which case we have to fall + // back to a layout that TSAN may not be happy with. + preferredTopDownAllocMin usermem.Addr = 0x7e8000000000 + preferredAllocationGap = 128 << 30 // 128 GB + preferredTopDownBaseMin = preferredTopDownAllocMin + preferredAllocationGap + + // minMmapRand64 is the smallest we are willing to make the + // randomization to stay above preferredTopDownBaseMin. + minMmapRand64 = (1 << 26) * usermem.PageSize +) + +// context64 represents an AMD64 context. +type context64 struct { + State + sigFPState []x86FPState // fpstate to be restored on sigreturn. +} + +// Arch implements Context.Arch. +func (c *context64) Arch() Arch { + return AMD64 +} + +func (c *context64) copySigFPState() []x86FPState { + var sigfps []x86FPState + for _, s := range c.sigFPState { + sigfps = append(sigfps, s.fork()) + } + return sigfps +} + +// Fork returns an exact copy of this context. +func (c *context64) Fork() Context { + return &context64{ + State: c.State.Fork(), + sigFPState: c.copySigFPState(), + } +} + +// Return returns the current syscall return value. +func (c *context64) Return() uintptr { + return uintptr(c.Regs.Rax) +} + +// SetReturn sets the syscall return value. +func (c *context64) SetReturn(value uintptr) { + c.Regs.Rax = uint64(value) +} + +// IP returns the current instruction pointer. +func (c *context64) IP() uintptr { + return uintptr(c.Regs.Rip) +} + +// SetIP sets the current instruction pointer. +func (c *context64) SetIP(value uintptr) { + c.Regs.Rip = uint64(value) +} + +// Stack returns the current stack pointer. +func (c *context64) Stack() uintptr { + return uintptr(c.Regs.Rsp) +} + +// SetStack sets the current stack pointer. +func (c *context64) SetStack(value uintptr) { + c.Regs.Rsp = uint64(value) +} + +// SetRSEQInterruptedIP implements Context.SetRSEQInterruptedIP. +func (c *context64) SetRSEQInterruptedIP(value uintptr) { + c.Regs.R10 = uint64(value) +} + +// Native returns the native type for the given val. +func (c *context64) Native(val uintptr) interface{} { + v := uint64(val) + return &v +} + +// Value returns the generic val for the given native type. +func (c *context64) Value(val interface{}) uintptr { + return uintptr(*val.(*uint64)) +} + +// Width returns the byte width of this architecture. +func (c *context64) Width() uint { + return 8 +} + +// FeatureSet returns the FeatureSet in use. +func (c *context64) FeatureSet() *cpuid.FeatureSet { + return c.State.FeatureSet +} + +// mmapRand returns a random adjustment for randomizing an mmap layout. +func mmapRand(max uint64) usermem.Addr { + return usermem.Addr(rand.Int63n(int64(max))).RoundDown() +} + +// NewMmapLayout implements Context.NewMmapLayout consistently with Linux. +func (c *context64) NewMmapLayout(min, max usermem.Addr, r *limits.LimitSet) (MmapLayout, error) { + min, ok := min.RoundUp() + if !ok { + return MmapLayout{}, syscall.EINVAL + } + if max > maxAddr64 { + max = maxAddr64 + } + max = max.RoundDown() + + if min > max { + return MmapLayout{}, syscall.EINVAL + } + + stackSize := r.Get(limits.Stack) + + // MAX_GAP in Linux. + maxGap := (max / 6) * 5 + gap := usermem.Addr(stackSize.Cur) + if gap < minGap64 { + gap = minGap64 + } + if gap > maxGap { + gap = maxGap + } + defaultDir := MmapTopDown + if stackSize.Cur == limits.Infinity { + defaultDir = MmapBottomUp + } + + topDownMin := max - gap - maxMmapRand64 + maxRand := usermem.Addr(maxMmapRand64) + if topDownMin < preferredTopDownBaseMin { + // Try to keep TopDownBase above preferredTopDownBaseMin by + // shrinking maxRand. + maxAdjust := maxRand - minMmapRand64 + needAdjust := preferredTopDownBaseMin - topDownMin + if needAdjust <= maxAdjust { + maxRand -= needAdjust + } + } + + rnd := mmapRand(uint64(maxRand)) + l := MmapLayout{ + MinAddr: min, + MaxAddr: max, + // TASK_UNMAPPED_BASE in Linux. + BottomUpBase: (max/3 + rnd).RoundDown(), + TopDownBase: (max - gap - rnd).RoundDown(), + DefaultDirection: defaultDir, + // We may have reduced the maximum randomization to keep + // TopDownBase above preferredTopDownBaseMin while maintaining + // our stack gap. Stack allocations must use that max + // randomization to avoiding eating into the gap. + MaxStackRand: uint64(maxRand), + } + + // Final sanity check on the layout. + if !l.Valid() { + panic(fmt.Sprintf("Invalid MmapLayout: %+v", l)) + } + + return l, nil +} + +// PIELoadAddress implements Context.PIELoadAddress. +func (c *context64) PIELoadAddress(l MmapLayout) usermem.Addr { + base := preferredPIELoadAddr + max, ok := base.AddLength(maxMmapRand64) + if !ok { + panic(fmt.Sprintf("preferredPIELoadAddr %#x too large", base)) + } + + if max > l.MaxAddr { + // preferredPIELoadAddr won't fit; fall back to the standard + // Linux behavior of 2/3 of TopDownBase. TSAN won't like this. + // + // Don't bother trying to shrink the randomization for now. + base = l.TopDownBase / 3 * 2 + } + + return base + mmapRand(maxMmapRand64) +} + +// userStructSize is the size in bytes of Linux's struct user on amd64. +const userStructSize = 928 + +// PtracePeekUser implements Context.PtracePeekUser. +func (c *context64) PtracePeekUser(addr uintptr) (interface{}, error) { + if addr&7 != 0 || addr >= userStructSize { + return nil, syscall.EIO + } + // PTRACE_PEEKUSER and PTRACE_POKEUSER are only effective on regs and + // u_debugreg, returning 0 or silently no-oping for other fields + // respectively. + if addr < uintptr(ptraceRegsSize) { + buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs()) + return c.Native(uintptr(usermem.ByteOrder.Uint64(buf[addr:]))), nil + } + // TODO: debug registers + return c.Native(0), nil +} + +// PtracePokeUser implements Context.PtracePokeUser. +func (c *context64) PtracePokeUser(addr, data uintptr) error { + if addr&7 != 0 || addr >= userStructSize { + return syscall.EIO + } + if addr < uintptr(ptraceRegsSize) { + buf := binary.Marshal(nil, usermem.ByteOrder, c.ptraceGetRegs()) + usermem.ByteOrder.PutUint64(buf[addr:], uint64(data)) + _, err := c.PtraceSetRegs(bytes.NewBuffer(buf)) + return err + } + // TODO: debug registers + return nil +} diff --git a/pkg/sentry/arch/arch_amd64.s b/pkg/sentry/arch/arch_amd64.s new file mode 100644 index 000000000..10d621b6d --- /dev/null +++ b/pkg/sentry/arch/arch_amd64.s @@ -0,0 +1,135 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// MXCSR_DEFAULT is the reset value of MXCSR (Intel SDM Vol. 2, Ch. 3.2 +// "LDMXCSR") +#define MXCSR_DEFAULT 0x1f80 + +// MXCSR_OFFSET is the offset in bytes of the MXCSR field from the start of the +// FXSAVE/XSAVE area. (Intel SDM Vol. 1, Table 10-2 "Format of an FXSAVE Area") +#define MXCSR_OFFSET 24 + +// initX86FPState initializes floating point state. +// +// func initX86FPState(data *FloatingPointData, useXsave bool) +// +// We need to clear out and initialize an empty fp state area since the sentry +// may have left sensitive information in the floating point registers. +// +// Preconditions: data is zeroed +TEXT ·initX86FPState(SB), $24-16 + // Save MXCSR (callee-save) + STMXCSR mxcsr-8(SP) + + // Save x87 CW (callee-save) + FSTCW cw-16(SP) + + MOVQ fpState+0(FP), DI + + // Do we use xsave? + MOVBQZX useXsave+8(FP), AX + TESTQ AX, AX + JZ no_xsave + + // Use XRSTOR to clear all FP state to an initial state. + // + // The fpState XSAVE area is zeroed on function entry, meaning + // XSTATE_BV is zero. + // + // "If RFBM[i] = 1 and bit i is clear in the XSTATE_BV field in the + // XSAVE header, XRSTOR initializes state component i." + // + // Initialization is defined in SDM Vol 1, Chapter 13.3. It puts all + // the registers in a reasonable initial state, except MXCSR: + // + // "The MXCSR register is part of state component 1, SSE state (see + // Section 13.5.2). However, the standard form of XRSTOR loads the + // MXCSR register from memory whenever the RFBM[1] (SSE) or RFBM[2] + // (AVX) is set, regardless of the values of XSTATE_BV[1] and + // XSTATE_BV[2]." + + // Set MXCSR to the default value. + MOVL $MXCSR_DEFAULT, MXCSR_OFFSET(DI) + + // Initialize registers with XRSTOR. + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x2f // XRSTOR64 0(DI) + + // Now that all the state has been reset, write it back out to the + // XSAVE area. + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x27 // XSAVE64 0(DI) + + JMP out + +no_xsave: + // Clear out existing X values. + PXOR X0, X0 + MOVO X0, X1 + MOVO X0, X2 + MOVO X0, X3 + MOVO X0, X4 + MOVO X0, X5 + MOVO X0, X6 + MOVO X0, X7 + MOVO X0, X8 + MOVO X0, X9 + MOVO X0, X10 + MOVO X0, X11 + MOVO X0, X12 + MOVO X0, X13 + MOVO X0, X14 + MOVO X0, X15 + + // Zero out %rax and store into MMX registers. MMX registers are + // an alias of 8x64 bits of the 8x80 bits used for the original + // x87 registers. Storing zero into them will reset the FPU registers + // to bits [63:0] = 0, [79:64] = 1. But the contents aren't too + // important, just the fact that we have reset them to a known value. + XORQ AX, AX + MOVQ AX, M0 + MOVQ AX, M1 + MOVQ AX, M2 + MOVQ AX, M3 + MOVQ AX, M4 + MOVQ AX, M5 + MOVQ AX, M6 + MOVQ AX, M7 + + // The Go assembler doesn't support FNINIT, so we use BYTE. + // This will: + // - Reset FPU control word to 0x037f + // - Clear FPU status word + // - Reset FPU tag word to 0xffff + // - Clear FPU data pointer + // - Clear FPU instruction pointer + BYTE $0xDB; BYTE $0xE3; // FNINIT + + // Reset MXCSR. + MOVL $MXCSR_DEFAULT, tmpmxcsr-24(SP) + LDMXCSR tmpmxcsr-24(SP) + + // Save the floating point state with fxsave. + FXSAVE64 0(DI) + +out: + // Restore MXCSR. + LDMXCSR mxcsr-8(SP) + + // Restore x87 CW. + FLDCW cw-16(SP) + + RET diff --git a/pkg/sentry/arch/arch_state_x86.go b/pkg/sentry/arch/arch_state_x86.go new file mode 100644 index 000000000..cb38d098a --- /dev/null +++ b/pkg/sentry/arch/arch_state_x86.go @@ -0,0 +1,97 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +import ( + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// warnOnce is used to warn about truncated state only once. +var warnOnce sync.Once + +// afterLoad is invoked by stateify. +func (s *State) afterLoad() { + old := s.x86FPState + + // Recreate the slice. This is done to ensure that it is aligned + // appropriately in memory, and large enough to accommodate any new + // state that may be saved by the new CPU. Even if extraneous new state + // is saved, the state we care about is guaranteed to be a subset of + // new state. Later optimizations can use less space when using a + // smaller state component bitmap. Intel SDM section 13 has more info. + s.x86FPState = newX86FPState() + + // x86FPState always contains all the FP state supported by the host. + // We may have come from a newer machine that supports additional state + // which we cannot restore. + // + // The x86 FP state areas are backwards compatible, so we can simply + // truncate the additional floating point state. Applications should + // not depend on the truncated state because it should relate only to + // features that were not exposed in the app FeatureSet. + if len(s.x86FPState) < len(old) { + warnOnce.Do(func() { + // This will occur on every instance of state, don't + // bother warning more than once. + log.Infof("dropping %d bytes of floating point state; the application should not depend on this state", len(old)-len(s.x86FPState)) + }) + } + + // Copy to the new, aligned location. + copy(s.x86FPState, old) +} + +type syscallPtraceRegs struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +// saveRegs is invoked by stateify. +func (s *State) saveRegs() syscallPtraceRegs { + return syscallPtraceRegs(s.Regs) +} + +// loadRegs is invoked by stateify. +func (s *State) loadRegs(r syscallPtraceRegs) { + s.Regs = syscall.PtraceRegs(r) +} diff --git a/pkg/sentry/arch/arch_x86.go b/pkg/sentry/arch/arch_x86.go new file mode 100644 index 000000000..5cc4f8377 --- /dev/null +++ b/pkg/sentry/arch/arch_x86.go @@ -0,0 +1,613 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 i386 + +package arch + +import ( + "fmt" + "io" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/log" + rpb "gvisor.googlesource.com/gvisor/pkg/sentry/arch/registers_go_proto" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// System-related constants for x86. +const ( + // SyscallWidth is the width of syscall, sysenter, and int 80 insturctions. + SyscallWidth = 2 +) + +// EFLAGS register bits. +const ( + // eflagsCF is the mask for the carry flag. + eflagsCF = uint64(1) << 0 + // eflagsPF is the mask for the parity flag. + eflagsPF = uint64(1) << 2 + // eflagsAF is the mask for the auxiliary carry flag. + eflagsAF = uint64(1) << 4 + // eflagsZF is the mask for the zero flag. + eflagsZF = uint64(1) << 6 + // eflagsSF is the mask for the sign flag. + eflagsSF = uint64(1) << 7 + // eflagsTF is the mask for the trap flag. + eflagsTF = uint64(1) << 8 + // eflagsIF is the mask for the interrupt flag. + eflagsIF = uint64(1) << 9 + // eflagsDF is the mask for the direction flag. + eflagsDF = uint64(1) << 10 + // eflagsOF is the mask for the overflow flag. + eflagsOF = uint64(1) << 11 + // eflagsIOPL is the mask for the I/O privilege level. + eflagsIOPL = uint64(3) << 12 + // eflagsNT is the mask for the nested task bit. + eflagsNT = uint64(1) << 14 + // eflagsRF is the mask for the resume flag. + eflagsRF = uint64(1) << 16 + // eflagsVM is the mask for the virtual mode bit. + eflagsVM = uint64(1) << 17 + // eflagsAC is the mask for the alignment check / access control bit. + eflagsAC = uint64(1) << 18 + // eflagsVIF is the mask for the virtual interrupt flag. + eflagsVIF = uint64(1) << 19 + // eflagsVIP is the mask for the virtual interrupt pending bit. + eflagsVIP = uint64(1) << 20 + // eflagsID is the mask for the CPUID detection bit. + eflagsID = uint64(1) << 21 + + // eflagsPtraceMutable is the mask for the set of EFLAGS that may be + // changed by ptrace(PTRACE_SETREGS). eflagsPtraceMutable is analogous to + // Linux's FLAG_MASK. + eflagsPtraceMutable = eflagsCF | eflagsPF | eflagsAF | eflagsZF | eflagsSF | eflagsTF | eflagsDF | eflagsOF | eflagsRF | eflagsAC | eflagsNT + + // eflagsRestorable is the mask for the set of EFLAGS that may be changed by + // SignalReturn. eflagsRestorable is analogous to Linux's FIX_EFLAGS. + eflagsRestorable = eflagsAC | eflagsOF | eflagsDF | eflagsTF | eflagsSF | eflagsZF | eflagsAF | eflagsPF | eflagsCF | eflagsRF +) + +// Segment selectors. See arch/x86/include/asm/segment.h. +const ( + userCS = 0x33 // guest ring 3 code selector + user32CS = 0x23 // guest ring 3 32 bit code selector + userDS = 0x2b // guest ring 3 data selector + + _FS_TLS_SEL = 0x63 // Linux FS thread-local storage selector + _GS_TLS_SEL = 0x6b // Linux GS thread-local storage selector +) + +var ( + // TrapInstruction is the x86 trap instruction. + TrapInstruction = [1]byte{0xcc} + + // CPUIDInstruction is the x86 CPUID instruction. + CPUIDInstruction = [2]byte{0xf, 0xa2} + + // X86TrapFlag is an exported const for use by other packages. + X86TrapFlag uint64 = (1 << 8) +) + +// x86FPState is x86 floating point state. +type x86FPState []byte + +// initX86FPState (defined in asm files) sets up initial state. +func initX86FPState(data *FloatingPointData, useXsave bool) + +func newX86FPStateSlice() []byte { + size, align := cpuid.HostFeatureSet().ExtendedStateSize() + capacity := size + // Always use at least 4096 bytes. + if capacity < 4096 { + capacity = 4096 + } + return alignedBytes(capacity, align)[:size] +} + +// newX86FPState returns an initialized floating point state. +// +// The returned state is large enough to store all floating point state +// supported by host, even if the app won't use much of it due to a restricted +// FeatureSet. Since they may still be able to see state not advertised by +// CPUID we must ensure it does not contain any sentry state. +func newX86FPState() x86FPState { + f := x86FPState(newX86FPStateSlice()) + initX86FPState(f.FloatingPointData(), cpuid.HostFeatureSet().UseXsave()) + return f +} + +// fork creates and returns an identical copy of the x86 floating point state. +func (f x86FPState) fork() x86FPState { + n := x86FPState(newX86FPStateSlice()) + copy(n, f) + return n +} + +// FloatingPointData returns the raw data pointer. +func (f x86FPState) FloatingPointData() *FloatingPointData { + return (*FloatingPointData)(&f[0]) +} + +// NewFloatingPointData returns a new floating point data blob. +// +// This is primarily for use in tests. +func NewFloatingPointData() *FloatingPointData { + return (*FloatingPointData)(&(newX86FPState()[0])) +} + +// State contains the common architecture bits for X86 (the build tag of this +// file ensures it's only built on x86). +type State struct { + // The system registers. + Regs syscall.PtraceRegs `state:".(syscallPtraceRegs)"` + + // Our floating point state. + x86FPState `state:"wait"` + + // FeatureSet is a pointer to the currently active feature set. + FeatureSet *cpuid.FeatureSet +} + +// Proto returns a protobuf representation of the system registers in State. +func (s State) Proto() *rpb.Registers { + regs := &rpb.AMD64Registers{ + Rax: s.Regs.Rax, + Rbx: s.Regs.Rbx, + Rcx: s.Regs.Rcx, + Rdx: s.Regs.Rdx, + Rsi: s.Regs.Rsi, + Rdi: s.Regs.Rdi, + Rsp: s.Regs.Rsp, + Rbp: s.Regs.Rbp, + R8: s.Regs.R8, + R9: s.Regs.R9, + R10: s.Regs.R10, + R11: s.Regs.R11, + R12: s.Regs.R12, + R13: s.Regs.R13, + R14: s.Regs.R14, + R15: s.Regs.R15, + Rip: s.Regs.Rip, + Rflags: s.Regs.Eflags, + OrigRax: s.Regs.Orig_rax, + Cs: s.Regs.Cs, + Ds: s.Regs.Ds, + Es: s.Regs.Es, + Fs: s.Regs.Fs, + Gs: s.Regs.Gs, + Ss: s.Regs.Ss, + FsBase: s.Regs.Fs_base, + GsBase: s.Regs.Gs_base, + } + return &rpb.Registers{Arch: &rpb.Registers_Amd64{Amd64: regs}} +} + +// Fork creates and returns an identical copy of the state. +func (s *State) Fork() State { + return State{ + Regs: s.Regs, + x86FPState: s.x86FPState.fork(), + FeatureSet: s.FeatureSet, + } +} + +// StateData implements Context.StateData. +func (s *State) StateData() *State { + return s +} + +// CPUIDEmulate emulates a cpuid instruction. +func (s *State) CPUIDEmulate(l log.Logger) { + argax := uint32(s.Regs.Rax) + argcx := uint32(s.Regs.Rcx) + ax, bx, cx, dx := s.FeatureSet.EmulateID(argax, argcx) + s.Regs.Rax = uint64(ax) + s.Regs.Rbx = uint64(bx) + s.Regs.Rcx = uint64(cx) + s.Regs.Rdx = uint64(dx) + l.Debugf("CPUID(%x,%x): %x %x %x %x", argax, argcx, ax, bx, cx, dx) +} + +// SingleStep implements Context.SingleStep. +func (s *State) SingleStep() bool { + return s.Regs.Eflags&X86TrapFlag != 0 +} + +// SetSingleStep enables single stepping. +func (s *State) SetSingleStep() { + // Set the trap flag. + s.Regs.Eflags |= X86TrapFlag +} + +// ClearSingleStep enables single stepping. +func (s *State) ClearSingleStep() { + // Clear the trap flag. + s.Regs.Eflags &= ^X86TrapFlag +} + +// RegisterMap returns a map of all registers. +func (s *State) RegisterMap() (map[string]uintptr, error) { + return map[string]uintptr{ + "R15": uintptr(s.Regs.R15), + "R14": uintptr(s.Regs.R14), + "R13": uintptr(s.Regs.R13), + "R12": uintptr(s.Regs.R12), + "Rbp": uintptr(s.Regs.Rbp), + "Rbx": uintptr(s.Regs.Rbx), + "R11": uintptr(s.Regs.R11), + "R10": uintptr(s.Regs.R10), + "R9": uintptr(s.Regs.R9), + "R8": uintptr(s.Regs.R8), + "Rax": uintptr(s.Regs.Rax), + "Rcx": uintptr(s.Regs.Rcx), + "Rdx": uintptr(s.Regs.Rdx), + "Rsi": uintptr(s.Regs.Rsi), + "Rdi": uintptr(s.Regs.Rdi), + "Orig_rax": uintptr(s.Regs.Orig_rax), + "Rip": uintptr(s.Regs.Rip), + "Cs": uintptr(s.Regs.Cs), + "Eflags": uintptr(s.Regs.Eflags), + "Rsp": uintptr(s.Regs.Rsp), + "Ss": uintptr(s.Regs.Ss), + "Fs_base": uintptr(s.Regs.Fs_base), + "Gs_base": uintptr(s.Regs.Gs_base), + "Ds": uintptr(s.Regs.Ds), + "Es": uintptr(s.Regs.Es), + "Fs": uintptr(s.Regs.Fs), + "Gs": uintptr(s.Regs.Gs), + }, nil +} + +// PtraceGetRegs implements Context.PtraceGetRegs. +func (s *State) PtraceGetRegs(dst io.Writer) (int, error) { + return dst.Write(binary.Marshal(nil, usermem.ByteOrder, s.ptraceGetRegs())) +} + +func (s *State) ptraceGetRegs() syscall.PtraceRegs { + regs := s.Regs + // These may not be initialized. + if regs.Cs == 0 || regs.Ss == 0 || regs.Eflags == 0 { + regs.Eflags = eflagsIF + regs.Cs = userCS + regs.Ss = userDS + } + // As an optimization, Linux <4.7 implements 32-bit fs_base/gs_base + // addresses using reserved descriptors in the GDT instead of the MSRs, + // with selector values FS_TLS_SEL and GS_TLS_SEL respectively. These + // values are actually visible in struct user_regs_struct::fs/gs; + // arch/x86/kernel/ptrace.c:getreg() doesn't attempt to sanitize struct + // thread_struct::fsindex/gsindex. + // + // We always use fs == gs == 0 when fs_base/gs_base is in use, for + // simplicity. + // + // Luckily, Linux <4.7 silently ignores setting fs/gs to 0 via + // arch/x86/kernel/ptrace.c:set_segment_reg() when fs_base/gs_base is a + // 32-bit value and fsindex/gsindex indicates that this optimization is + // in use, as well as the reverse case of setting fs/gs to + // FS/GS_TLS_SEL when fs_base/gs_base is a 64-bit value. (We do the + // same in PtraceSetRegs.) + // + // TODO: Remove this fixup since newer Linux doesn't have + // this behavior anymore. + if regs.Fs == 0 && regs.Fs_base <= 0xffffffff { + regs.Fs = _FS_TLS_SEL + } + if regs.Gs == 0 && regs.Gs_base <= 0xffffffff { + regs.Gs = _GS_TLS_SEL + } + return regs +} + +var ptraceRegsSize = int(binary.Size(syscall.PtraceRegs{})) + +// PtraceSetRegs implements Context.PtraceSetRegs. +func (s *State) PtraceSetRegs(src io.Reader) (int, error) { + var regs syscall.PtraceRegs + buf := make([]byte, ptraceRegsSize) + if _, err := io.ReadFull(src, buf); err != nil { + return 0, err + } + binary.Unmarshal(buf, usermem.ByteOrder, ®s) + // Truncate segment registers to 16 bits. + regs.Cs = uint64(uint16(regs.Cs)) + regs.Ds = uint64(uint16(regs.Ds)) + regs.Es = uint64(uint16(regs.Es)) + regs.Fs = uint64(uint16(regs.Fs)) + regs.Gs = uint64(uint16(regs.Gs)) + regs.Ss = uint64(uint16(regs.Ss)) + // In Linux this validation is via arch/x86/kernel/ptrace.c:putreg(). + if !isUserSegmentSelector(regs.Cs) { + return 0, syscall.EIO + } + if regs.Ds != 0 && !isUserSegmentSelector(regs.Ds) { + return 0, syscall.EIO + } + if regs.Es != 0 && !isUserSegmentSelector(regs.Es) { + return 0, syscall.EIO + } + if regs.Fs != 0 && !isUserSegmentSelector(regs.Fs) { + return 0, syscall.EIO + } + if regs.Gs != 0 && !isUserSegmentSelector(regs.Gs) { + return 0, syscall.EIO + } + if !isUserSegmentSelector(regs.Ss) { + return 0, syscall.EIO + } + if regs.Fs_base >= uint64(maxAddr64) { + return 0, syscall.EIO + } + if regs.Gs_base >= uint64(maxAddr64) { + return 0, syscall.EIO + } + // CS and SS are validated, but changes to them are otherwise silently + // ignored on amd64. + regs.Cs = s.Regs.Cs + regs.Ss = s.Regs.Ss + // fs_base/gs_base changes reset fs/gs via do_arch_prctl() on Linux. + if regs.Fs_base != s.Regs.Fs_base { + regs.Fs = 0 + } + if regs.Gs_base != s.Regs.Gs_base { + regs.Gs = 0 + } + // Ignore "stale" TLS segment selectors for FS and GS. See comment in + // ptraceGetRegs. + if regs.Fs == _FS_TLS_SEL && regs.Fs_base != 0 { + regs.Fs = 0 + } + if regs.Gs == _GS_TLS_SEL && regs.Gs_base != 0 { + regs.Gs = 0 + } + regs.Eflags = (s.Regs.Eflags &^ eflagsPtraceMutable) | (regs.Eflags & eflagsPtraceMutable) + s.Regs = regs + return ptraceRegsSize, nil +} + +// isUserSegmentSelector returns true if the given segment selector specifies a +// privilege level of 3 (USER_RPL). +func isUserSegmentSelector(reg uint64) bool { + return reg&3 == 3 +} + +// ptraceFPRegsSize is the size in bytes of Linux's user_i387_struct, the type +// manipulated by PTRACE_GETFPREGS and PTRACE_SETFPREGS on x86. Equivalently, +// ptraceFPRegsSize is the size in bytes of the x86 FXSAVE area. +const ptraceFPRegsSize = 512 + +// PtraceGetFPRegs implements Context.PtraceGetFPRegs. +func (s *State) PtraceGetFPRegs(dst io.Writer) (int, error) { + return dst.Write(s.x86FPState[:ptraceFPRegsSize]) +} + +// PtraceSetFPRegs implements Context.PtraceSetFPRegs. +func (s *State) PtraceSetFPRegs(src io.Reader) (int, error) { + var f [ptraceFPRegsSize]byte + n, err := io.ReadFull(src, f[:]) + if err != nil { + return 0, err + } + // Force reserved bits in MXCSR to 0. This is consistent with Linux. + sanitizeMXCSR(x86FPState(f[:])) + // N.B. this only copies the beginning of the FP state, which + // corresponds to the FXSAVE area. + copy(s.x86FPState, f[:]) + return n, nil +} + +const ( + // mxcsrOffset is the offset in bytes of the MXCSR field from the start of + // the FXSAVE area. (Intel SDM Vol. 1, Table 10-2 "Format of an FXSAVE + // Area") + mxcsrOffset = 24 + + // mxcsrMaskOffset is the offset in bytes of the MXCSR_MASK field from the + // start of the FXSAVE area. + mxcsrMaskOffset = 28 +) + +var ( + mxcsrMask uint32 + initMXCSRMask sync.Once +) + +// sanitizeMXCSR coerces reserved bits in the MXCSR field of f to 0. ("FXRSTOR +// generates a general-protection fault (#GP) in response to an attempt to set +// any of the reserved bits of the MXCSR register." - Intel SDM Vol. 1, Section +// 10.5.1.2 "SSE State") +func sanitizeMXCSR(f x86FPState) { + mxcsr := usermem.ByteOrder.Uint32(f[mxcsrOffset:]) + initMXCSRMask.Do(func() { + temp := x86FPState(alignedBytes(uint(ptraceFPRegsSize), 16)) + initX86FPState(temp.FloatingPointData(), false /* useXsave */) + mxcsrMask = usermem.ByteOrder.Uint32(temp[mxcsrMaskOffset:]) + if mxcsrMask == 0 { + // "If the value of the MXCSR_MASK field is 00000000H, then the + // MXCSR_MASK value is the default value of 0000FFBFH." - Intel SDM + // Vol. 1, Section 11.6.6 "Guidelines for Writing to the MXCSR + // Register" + mxcsrMask = 0xffbf + } + }) + mxcsr &= mxcsrMask + usermem.ByteOrder.PutUint32(f[mxcsrOffset:], mxcsr) +} + +const ( + // minXstateBytes is the minimum size in bytes of an x86 XSAVE area, equal + // to the size of the XSAVE legacy area (512 bytes) plus the size of the + // XSAVE header (64 bytes). Equivalently, minXstateBytes is GDB's + // X86_XSTATE_SSE_SIZE. + minXstateBytes = 512 + 64 + + // userXstateXCR0Offset is the offset in bytes of the USER_XSTATE_XCR0_WORD + // field in Linux's struct user_xstateregs, which is the type manipulated + // by ptrace(PTRACE_GET/SETREGSET, NT_X86_XSTATE). Equivalently, + // userXstateXCR0Offset is GDB's I386_LINUX_XSAVE_XCR0_OFFSET. + userXstateXCR0Offset = 464 + + // xstateBVOffset is the offset in bytes of the XSTATE_BV field in an x86 + // XSAVE area. + xstateBVOffset = 512 + + // xsaveHeaderZeroedOffset and xsaveHeaderZeroedBytes indicate parts of the + // XSAVE header that we coerce to zero: "Bytes 15:8 of the XSAVE header is + // a state-component bitmap called XCOMP_BV. ... Bytes 63:16 of the XSAVE + // header are reserved." - Intel SDM Vol. 1, Section 13.4.2 "XSAVE Header". + // Linux ignores XCOMP_BV, but it's able to recover from XRSTOR #GP + // exceptions resulting from invalid values; we aren't. Linux also never + // uses the compacted format when doing XSAVE and doesn't even define the + // compaction extensions to XSAVE as a CPU feature, so for simplicity we + // assume no one is using them. + xsaveHeaderZeroedOffset = 512 + 8 + xsaveHeaderZeroedBytes = 64 - 8 +) + +func (s *State) ptraceGetXstateRegs(dst io.Writer, maxlen int) (int, error) { + // N.B. s.x86FPState may contain more state than the application + // expects. We only copy the subset that would be in their XSAVE area. + ess, _ := s.FeatureSet.ExtendedStateSize() + f := make([]byte, ess) + copy(f, s.x86FPState) + // "The XSAVE feature set does not use bytes 511:416; bytes 463:416 are + // reserved." - Intel SDM Vol 1., Section 13.4.1 "Legacy Region of an XSAVE + // Area". Linux uses the first 8 bytes of this area to store the OS XSTATE + // mask. GDB relies on this: see + // gdb/x86-linux-nat.c:x86_linux_read_description(). + usermem.ByteOrder.PutUint64(f[userXstateXCR0Offset:], s.FeatureSet.ValidXCR0Mask()) + if len(f) > maxlen { + f = f[:maxlen] + } + return dst.Write(f) +} + +func (s *State) ptraceSetXstateRegs(src io.Reader, maxlen int) (int, error) { + // Allow users to pass an xstate register set smaller than ours (they can + // mask bits out of XSTATE_BV), as long as it's at least minXstateBytes. + // Also allow users to pass a register set larger than ours; anything after + // their ExtendedStateSize will be ignored. (I think Linux technically + // permits setting a register set smaller than minXstateBytes, but it has + // the same silent truncation behavior in kernel/ptrace.c:ptrace_regset().) + if maxlen < minXstateBytes { + return 0, syscall.EFAULT + } + ess, _ := s.FeatureSet.ExtendedStateSize() + if maxlen > int(ess) { + maxlen = int(ess) + } + f := make([]byte, maxlen) + if _, err := io.ReadFull(src, f); err != nil { + return 0, err + } + // Force reserved bits in MXCSR to 0. This is consistent with Linux. + sanitizeMXCSR(x86FPState(f)) + // Users can't enable *more* XCR0 bits than what we, and the CPU, support. + xstateBV := usermem.ByteOrder.Uint64(f[xstateBVOffset:]) + xstateBV &= s.FeatureSet.ValidXCR0Mask() + usermem.ByteOrder.PutUint64(f[xstateBVOffset:], xstateBV) + // Force XCOMP_BV and reserved bytes in the XSAVE header to 0. + reserved := f[xsaveHeaderZeroedOffset : xsaveHeaderZeroedOffset+xsaveHeaderZeroedBytes] + for i := range reserved { + reserved[i] = 0 + } + return copy(s.x86FPState, f), nil +} + +// Register sets defined in include/uapi/linux/elf.h. +const ( + _NT_PRSTATUS = 1 + _NT_PRFPREG = 2 + _NT_X86_XSTATE = 0x202 +) + +// PtraceGetRegSet implements Context.PtraceGetRegSet. +func (s *State) PtraceGetRegSet(regset uintptr, dst io.Writer, maxlen int) (int, error) { + switch regset { + case _NT_PRSTATUS: + if maxlen < ptraceRegsSize { + return 0, syserror.EFAULT + } + return s.PtraceGetRegs(dst) + case _NT_PRFPREG: + if maxlen < ptraceFPRegsSize { + return 0, syserror.EFAULT + } + return s.PtraceGetFPRegs(dst) + case _NT_X86_XSTATE: + return s.ptraceGetXstateRegs(dst, maxlen) + default: + return 0, syserror.EINVAL + } +} + +// PtraceSetRegSet implements Context.PtraceSetRegSet. +func (s *State) PtraceSetRegSet(regset uintptr, src io.Reader, maxlen int) (int, error) { + switch regset { + case _NT_PRSTATUS: + if maxlen < ptraceRegsSize { + return 0, syserror.EFAULT + } + return s.PtraceSetRegs(src) + case _NT_PRFPREG: + if maxlen < ptraceFPRegsSize { + return 0, syserror.EFAULT + } + return s.PtraceSetFPRegs(src) + case _NT_X86_XSTATE: + return s.ptraceSetXstateRegs(src, maxlen) + default: + return 0, syserror.EINVAL + } +} + +// FullRestore indicates whether a full restore is required. +func (s *State) FullRestore() bool { + // A fast system call return is possible only if + // + // * RCX matches the instruction pointer. + // * R11 matches our flags value. + // * Usermode does not expect to set either the resume flag or the + // virtual mode flags (unlikely.) + // * CS and SS are set to the standard selectors. + // + // That is, SYSRET results in the correct final state. + fastRestore := s.Regs.Rcx == s.Regs.Rip && + s.Regs.Eflags == s.Regs.R11 && + (s.Regs.Eflags&eflagsRF == 0) && + (s.Regs.Eflags&eflagsVM == 0) && + s.Regs.Cs == userCS && + s.Regs.Ss == userDS + return !fastRestore +} + +// New returns a new architecture context. +func New(arch Arch, fs *cpuid.FeatureSet) Context { + switch arch { + case AMD64: + return &context64{ + State{ + x86FPState: newX86FPState(), + FeatureSet: fs, + }, + []x86FPState(nil), + } + } + panic(fmt.Sprintf("unknown architecture %v", arch)) +} diff --git a/pkg/sentry/arch/auxv.go b/pkg/sentry/arch/auxv.go new file mode 100644 index 000000000..70e0e35b7 --- /dev/null +++ b/pkg/sentry/arch/auxv.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// An AuxEntry represents an entry in an ELF auxiliary vector. +type AuxEntry struct { + Key uint64 + Value usermem.Addr +} + +// An Auxv represents an ELF auxiliary vector. +type Auxv []AuxEntry diff --git a/pkg/sentry/arch/registers.proto b/pkg/sentry/arch/registers.proto new file mode 100644 index 000000000..437ff44ca --- /dev/null +++ b/pkg/sentry/arch/registers.proto @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +message AMD64Registers { + uint64 rax = 1; + uint64 rbx = 2; + uint64 rcx = 3; + uint64 rdx = 4; + uint64 rsi = 5; + uint64 rdi = 6; + uint64 rsp = 7; + uint64 rbp = 8; + + uint64 r8 = 9; + uint64 r9 = 10; + uint64 r10 = 11; + uint64 r11 = 12; + uint64 r12 = 13; + uint64 r13 = 14; + uint64 r14 = 15; + uint64 r15 = 16; + + uint64 rip = 17; + uint64 rflags = 18; + uint64 orig_rax = 19; + uint64 cs = 20; + uint64 ds = 21; + uint64 es = 22; + uint64 fs = 23; + uint64 gs = 24; + uint64 ss = 25; + uint64 fs_base = 26; + uint64 gs_base = 27; +} + +message Registers { + oneof arch { + AMD64Registers amd64 = 1; + } +} diff --git a/pkg/sentry/arch/signal_act.go b/pkg/sentry/arch/signal_act.go new file mode 100644 index 000000000..36437b965 --- /dev/null +++ b/pkg/sentry/arch/signal_act.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +// Special values for SignalAct.Handler. +const ( + // SignalActDefault is SIG_DFL and specifies that the default behavior for + // a signal should be taken. + SignalActDefault = 0 + + // SignalActIgnore is SIG_IGN and specifies that a signal should be + // ignored. + SignalActIgnore = 1 +) + +// Available signal flags. +const ( + SignalFlagNoCldStop = 0x00000001 + SignalFlagNoCldWait = 0x00000002 + SignalFlagSigInfo = 0x00000004 + SignalFlagRestorer = 0x04000000 + SignalFlagOnStack = 0x08000000 + SignalFlagRestart = 0x10000000 + SignalFlagInterrupt = 0x20000000 + SignalFlagNoDefer = 0x40000000 + SignalFlagResetHandler = 0x80000000 +) + +// IsSigInfo returns true iff this handle expects siginfo. +func (s SignalAct) IsSigInfo() bool { + return s.Flags&SignalFlagSigInfo != 0 +} + +// IsNoDefer returns true iff this SignalAct has the NoDefer flag set. +func (s SignalAct) IsNoDefer() bool { + return s.Flags&SignalFlagNoDefer != 0 +} + +// IsRestart returns true iff this SignalAct has the Restart flag set. +func (s SignalAct) IsRestart() bool { + return s.Flags&SignalFlagRestart != 0 +} + +// IsResetHandler returns true iff this SignalAct has the ResetHandler flag set. +func (s SignalAct) IsResetHandler() bool { + return s.Flags&SignalFlagResetHandler != 0 +} + +// IsOnStack returns true iff this SignalAct has the OnStack flag set. +func (s SignalAct) IsOnStack() bool { + return s.Flags&SignalFlagOnStack != 0 +} + +// HasRestorer returns true iff this SignalAct has the Restorer flag set. +func (s SignalAct) HasRestorer() bool { + return s.Flags&SignalFlagRestorer != 0 +} + +// NativeSignalAct is a type that is equivalent to struct sigaction in the +// guest architecture. +type NativeSignalAct interface { + // SerializeFrom copies the data in the host SignalAct s into this object. + SerializeFrom(s *SignalAct) + + // DeserializeTo copies the data in this object into the host SignalAct s. + DeserializeTo(s *SignalAct) +} diff --git a/pkg/sentry/arch/signal_amd64.go b/pkg/sentry/arch/signal_amd64.go new file mode 100644 index 000000000..4040b530f --- /dev/null +++ b/pkg/sentry/arch/signal_amd64.go @@ -0,0 +1,476 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package arch + +import ( + "encoding/binary" + "math" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// SignalAct represents the action that should be taken when a signal is +// delivered, and is equivalent to struct sigaction on 64-bit x86. +type SignalAct struct { + Handler uint64 + Flags uint64 + Restorer uint64 + Mask linux.SignalSet +} + +// SerializeFrom implements NativeSignalAct.SerializeFrom. +func (s *SignalAct) SerializeFrom(other *SignalAct) { + *s = *other +} + +// DeserializeTo implements NativeSignalAct.DeserializeTo. +func (s *SignalAct) DeserializeTo(other *SignalAct) { + *other = *s +} + +// SignalStack represents information about a user stack, and is equivalent to +// stack_t on 64-bit x86. +type SignalStack struct { + Addr uint64 + Flags uint32 + _ uint32 + Size uint64 +} + +// SerializeFrom implements NativeSignalStack.SerializeFrom. +func (s *SignalStack) SerializeFrom(other *SignalStack) { + *s = *other +} + +// DeserializeTo implements NativeSignalStack.DeserializeTo. +func (s *SignalStack) DeserializeTo(other *SignalStack) { + *other = *s +} + +// SignalInfo represents information about a signal being delivered, and is +// equivalent to struct siginfo on 64-bit x86. +type SignalInfo struct { + Signo int32 // Signal number + Errno int32 // Errno value + Code int32 // Signal code + _ uint32 + + // struct siginfo::_sifields is a union. In SignalInfo, fields in the union + // are accessed through methods. + // + // For reference, here is the definition of _sifields: (_sigfault._trapno, + // which does not exist on x86, omitted for clarity) + // + // union { + // int _pad[SI_PAD_SIZE]; + // + // /* kill() */ + // struct { + // __kernel_pid_t _pid; /* sender's pid */ + // __ARCH_SI_UID_T _uid; /* sender's uid */ + // } _kill; + // + // /* POSIX.1b timers */ + // struct { + // __kernel_timer_t _tid; /* timer id */ + // int _overrun; /* overrun count */ + // char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; + // sigval_t _sigval; /* same as below */ + // int _sys_private; /* not to be passed to user */ + // } _timer; + // + // /* POSIX.1b signals */ + // struct { + // __kernel_pid_t _pid; /* sender's pid */ + // __ARCH_SI_UID_T _uid; /* sender's uid */ + // sigval_t _sigval; + // } _rt; + // + // /* SIGCHLD */ + // struct { + // __kernel_pid_t _pid; /* which child */ + // __ARCH_SI_UID_T _uid; /* sender's uid */ + // int _status; /* exit code */ + // __ARCH_SI_CLOCK_T _utime; + // __ARCH_SI_CLOCK_T _stime; + // } _sigchld; + // + // /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + // struct { + // void *_addr; /* faulting insn/memory ref. */ + // short _addr_lsb; /* LSB of the reported address */ + // } _sigfault; + // + // /* SIGPOLL */ + // struct { + // __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + // int _fd; + // } _sigpoll; + // + // /* SIGSYS */ + // struct { + // void *_call_addr; /* calling user insn */ + // int _syscall; /* triggering system call number */ + // unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + // } _sigsys; + // } _sifields; + // + // _sifields is padded so that the size of siginfo is SI_MAX_SIZE = 128 + // bytes. + Fields [128 - 16]byte +} + +// FixSignalCodeForUser fixes up si_code. +// +// The si_code we get from Linux may contain the kernel-specific code in the +// top 16 bits if it's positive (e.g., from ptrace). Linux's +// copy_siginfo_to_user does +// err |= __put_user((short)from->si_code, &to->si_code); +// to mask out those bits and we need to do the same. +func (s *SignalInfo) FixSignalCodeForUser() { + if s.Code > 0 { + s.Code &= 0x0000ffff + } +} + +// Pid returns the si_pid field. +func (s *SignalInfo) Pid() int32 { + return int32(usermem.ByteOrder.Uint32(s.Fields[0:4])) +} + +// SetPid mutates the si_pid field. +func (s *SignalInfo) SetPid(val int32) { + usermem.ByteOrder.PutUint32(s.Fields[0:4], uint32(val)) +} + +// Uid returns the si_uid field. +func (s *SignalInfo) Uid() int32 { + return int32(usermem.ByteOrder.Uint32(s.Fields[4:8])) +} + +// SetUid mutates the si_uid field. +func (s *SignalInfo) SetUid(val int32) { + usermem.ByteOrder.PutUint32(s.Fields[4:8], uint32(val)) +} + +// Addr returns the si_addr field. +func (s *SignalInfo) Addr() uint64 { + return usermem.ByteOrder.Uint64(s.Fields[0:8]) +} + +// SetAddr sets the si_addr field. +func (s *SignalInfo) SetAddr(val uint64) { + usermem.ByteOrder.PutUint64(s.Fields[0:8], val) +} + +// Status returns the si_status field. +func (s *SignalInfo) Status() int32 { + return int32(usermem.ByteOrder.Uint32(s.Fields[8:12])) +} + +// SetStatus mutates the si_status field. +func (s *SignalInfo) SetStatus(val int32) { + usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) +} + +// CallAddr returns the si_call_addr field. +func (s *SignalInfo) CallAddr() uint64 { + return usermem.ByteOrder.Uint64(s.Fields[0:8]) +} + +// SetCallAddr mutates the si_call_addr field. +func (s *SignalInfo) SetCallAddr(val uint64) { + usermem.ByteOrder.PutUint64(s.Fields[0:8], val) +} + +// Syscall returns the si_syscall field. +func (s *SignalInfo) Syscall() int32 { + return int32(usermem.ByteOrder.Uint32(s.Fields[8:12])) +} + +// SetSyscall mutates the si_syscall field. +func (s *SignalInfo) SetSyscall(val int32) { + usermem.ByteOrder.PutUint32(s.Fields[8:12], uint32(val)) +} + +// Arch returns the si_arch field. +func (s *SignalInfo) Arch() uint32 { + return usermem.ByteOrder.Uint32(s.Fields[12:16]) +} + +// SetArch mutates the si_arch field. +func (s *SignalInfo) SetArch(val uint32) { + usermem.ByteOrder.PutUint32(s.Fields[12:16], val) +} + +// SignalContext64 is equivalent to struct sigcontext, the type passed as the +// second argument to signal handlers set by signal(2). +type SignalContext64 struct { + R8 uint64 + R9 uint64 + R10 uint64 + R11 uint64 + R12 uint64 + R13 uint64 + R14 uint64 + R15 uint64 + Rdi uint64 + Rsi uint64 + Rbp uint64 + Rbx uint64 + Rdx uint64 + Rax uint64 + Rcx uint64 + Rsp uint64 + Rip uint64 + Eflags uint64 + Cs uint16 + Gs uint16 // always 0 on amd64. + Fs uint16 // always 0 on amd64. + Ss uint16 // only restored if _UC_STRICT_RESTORE_SS (unsupported). + Err uint64 + Trapno uint64 + Oldmask linux.SignalSet + Cr2 uint64 + // Pointer to a struct _fpstate. + Fpstate uint64 + Reserved [8]uint64 +} + +// Flags for UContext64.Flags. +const ( + _UC_FP_XSTATE = 1 + _UC_SIGCONTEXT_SS = 2 + _UC_STRICT_RESTORE_SS = 4 +) + +// UContext64 is equivalent to ucontext_t on 64-bit x86. +type UContext64 struct { + Flags uint64 + Link uint64 + Stack SignalStack + MContext SignalContext64 + Sigset linux.SignalSet +} + +// NewSignalAct implements Context.NewSignalAct. +func (c *context64) NewSignalAct() NativeSignalAct { + return &SignalAct{} +} + +// NewSignalStack implements Context.NewSignalStack. +func (c *context64) NewSignalStack() NativeSignalStack { + return &SignalStack{} +} + +// From Linux 'arch/x86/include/uapi/asm/sigcontext.h' the following is the +// size of the magic cookie at the end of the xsave frame. +// +// NOTE: Currently we don't actually populate the fpstate +// on the signal stack. +const _FP_XSTATE_MAGIC2_SIZE = 4 + +func (c *context64) fpuFrameSize() (size int, useXsave bool) { + size = len(c.x86FPState) + if size > 512 { + // Make room for the magic cookie at the end of the xsave frame. + size += _FP_XSTATE_MAGIC2_SIZE + useXsave = true + } + return size, useXsave +} + +// SignalSetup implements Context.SignalSetup. (Compare to Linux's +// arch/x86/kernel/signal.c:__setup_rt_frame().) +func (c *context64) SignalSetup(st *Stack, act *SignalAct, info *SignalInfo, alt *SignalStack, sigset linux.SignalSet) error { + sp := st.Bottom + + // "The 128-byte area beyond the location pointed to by %rsp is considered + // to be reserved and shall not be modified by signal or interrupt + // handlers. ... leaf functions may use this area for their entire stack + // frame, rather than adjusting the stack pointer in the prologue and + // epilogue." - AMD64 ABI + // + // (But this doesn't apply if we're starting at the top of the signal + // stack, in which case there is no following stack frame.) + if !(alt.IsEnabled() && sp == alt.Top()) { + sp -= 128 + } + + // Allocate space for floating point state on the stack. + // + // This isn't strictly necessary because we don't actually populate + // the fpstate. However we do store the floating point state of the + // interrupted thread inside the sentry. Simply accounting for this + // space on the user stack naturally caps the amount of memory the + // sentry will allocate for this purpose. + fpSize, _ := c.fpuFrameSize() + sp = (sp - usermem.Addr(fpSize)) & ^usermem.Addr(63) + + // Construct the UContext64 now since we need its size. + uc := &UContext64{ + // No _UC_FP_XSTATE: see Fpstate above. + // No _UC_STRICT_RESTORE_SS: we don't allow SS changes. + Flags: _UC_SIGCONTEXT_SS, + Stack: *alt, + MContext: SignalContext64{ + R8: c.Regs.R8, + R9: c.Regs.R9, + R10: c.Regs.R10, + R11: c.Regs.R11, + R12: c.Regs.R12, + R13: c.Regs.R13, + R14: c.Regs.R14, + R15: c.Regs.R15, + Rdi: c.Regs.Rdi, + Rsi: c.Regs.Rsi, + Rbp: c.Regs.Rbp, + Rbx: c.Regs.Rbx, + Rdx: c.Regs.Rdx, + Rax: c.Regs.Rax, + Rcx: c.Regs.Rcx, + Rsp: c.Regs.Rsp, + Rip: c.Regs.Rip, + Eflags: c.Regs.Eflags, + Cs: uint16(c.Regs.Cs), + Ss: uint16(c.Regs.Ss), + Oldmask: sigset, + }, + Sigset: sigset, + } + + // TODO: Set SignalContext64.Err, Trapno, and Cr2 based on + // the fault that caused the signal. For now, leave Err and Trapno + // unset and assume CR2 == info.Addr() for SIGSEGVs and SIGBUSes. + if linux.Signal(info.Signo) == linux.SIGSEGV || linux.Signal(info.Signo) == linux.SIGBUS { + uc.MContext.Cr2 = info.Addr() + } + + // "... the value (%rsp+8) is always a multiple of 16 (...) when control is + // transferred to the function entry point." - AMD64 ABI + ucSize := binary.Size(uc) + if ucSize < 0 { + // This can only happen if we've screwed up the definition of + // UContext64. + panic("can't get size of UContext64") + } + // st.Arch.Width() is for the restorer address. sizeof(siginfo) == 128. + frameSize := int(st.Arch.Width()) + ucSize + 128 + frameBottom := (sp-usermem.Addr(frameSize)) & ^usermem.Addr(15) - 8 + sp = frameBottom + usermem.Addr(frameSize) + st.Bottom = sp + + info.FixSignalCodeForUser() + + // Set up the stack frame. + infoAddr, err := st.Push(info) + if err != nil { + return err + } + ucAddr, err := st.Push(uc) + if err != nil { + return err + } + if act.HasRestorer() { + // Push the restorer return address. + // Note that this doesn't need to be popped. + if _, err := st.Push(usermem.Addr(act.Restorer)); err != nil { + return err + } + } else { + // amd64 requires a restorer. + return syscall.EFAULT + } + + // Set up registers. + c.Regs.Rip = act.Handler + c.Regs.Rsp = uint64(st.Bottom) + c.Regs.Rdi = uint64(info.Signo) + c.Regs.Rsi = uint64(infoAddr) + c.Regs.Rdx = uint64(ucAddr) + c.Regs.Rax = 0 + c.Regs.Ds = userDS + c.Regs.Es = userDS + c.Regs.Cs = userCS + c.Regs.Ss = userDS + + // Save the thread's floating point state. + c.sigFPState = append(c.sigFPState, c.x86FPState) + + // Signal handler gets a clean floating point state. + c.x86FPState = newX86FPState() + + return nil +} + +// SignalRestore implements Context.SignalRestore. (Compare to Linux's +// arch/x86/kernel/signal.c:sys_rt_sigreturn().) +func (c *context64) SignalRestore(st *Stack, rt bool) (linux.SignalSet, error) { + // Copy out the stack frame. + var uc UContext64 + if _, err := st.Pop(&uc); err != nil { + return 0, err + } + var info SignalInfo + if _, err := st.Pop(&info); err != nil { + return 0, err + } + + // Restore registers. + c.Regs.R8 = uc.MContext.R8 + c.Regs.R9 = uc.MContext.R9 + c.Regs.R10 = uc.MContext.R10 + c.Regs.R11 = uc.MContext.R11 + c.Regs.R12 = uc.MContext.R12 + c.Regs.R13 = uc.MContext.R13 + c.Regs.R14 = uc.MContext.R14 + c.Regs.R15 = uc.MContext.R15 + c.Regs.Rdi = uc.MContext.Rdi + c.Regs.Rsi = uc.MContext.Rsi + c.Regs.Rbp = uc.MContext.Rbp + c.Regs.Rbx = uc.MContext.Rbx + c.Regs.Rdx = uc.MContext.Rdx + c.Regs.Rax = uc.MContext.Rax + c.Regs.Rcx = uc.MContext.Rcx + c.Regs.Rsp = uc.MContext.Rsp + c.Regs.Rip = uc.MContext.Rip + c.Regs.Eflags = (c.Regs.Eflags & ^eflagsRestorable) | (uc.MContext.Eflags & eflagsRestorable) + c.Regs.Cs = uint64(uc.MContext.Cs) | 3 + // N.B. _UC_STRICT_RESTORE_SS not supported. + c.Regs.Orig_rax = math.MaxUint64 + + // Restore floating point state. + l := len(c.sigFPState) + if l > 0 { + c.x86FPState = c.sigFPState[l-1] + // NOTE: State save requires that any slice + // elements from '[len:cap]' to be zero value. + c.sigFPState[l-1] = nil + c.sigFPState = c.sigFPState[0 : l-1] + } else { + // This might happen if sigreturn(2) calls are unbalanced with + // respect to signal handler entries. This is not expected so + // don't bother to do anything fancy with the floating point + // state. + log.Infof("sigreturn unable to restore application fpstate") + } + + return uc.Sigset, nil +} diff --git a/pkg/sentry/arch/signal_info.go b/pkg/sentry/arch/signal_info.go new file mode 100644 index 000000000..ec004ae75 --- /dev/null +++ b/pkg/sentry/arch/signal_info.go @@ -0,0 +1,66 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +// Possible values for SignalInfo.Code. These values originate from the Linux +// kernel's include/uapi/asm-generic/siginfo.h. +const ( + // SignalInfoUser (properly SI_USER) indicates that a signal was sent from + // a kill() or raise() syscall. + SignalInfoUser = 0 + + // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent + // by the kernel. + SignalInfoKernel = 0x80 + + // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent + // by an expired timer. + SignalInfoTimer = -2 + + // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent + // from a tkill() or tgkill() syscall. + SignalInfoTkill = -6 + + // CLD_* codes are only meaningful for SIGCHLD. + + // CLD_EXITED indicates that a task exited. + CLD_EXITED = 1 + + // CLD_KILLED indicates that a task was killed by a signal. + CLD_KILLED = 2 + + // CLD_DUMPED indicates that a task was killed by a signal and then dumped + // core. + CLD_DUMPED = 3 + + // CLD_TRAPPED indicates that a task was stopped by ptrace. + CLD_TRAPPED = 4 + + // CLD_STOPPED indicates that a thread group completed a group stop. + CLD_STOPPED = 5 + + // CLD_CONTINUED indicates that a group-stopped thread group was continued. + CLD_CONTINUED = 6 + + // SYS_* codes are only meaningful for SIGSYS. + + // SYS_SECCOMP indicates that a signal originates from seccomp. + SYS_SECCOMP = 1 + + // TRAP_* codes are only meaningful for SIGTRAP. + + // TRAP_BRKPT indicates a breakpoint trap. + TRAP_BRKPT = 1 +) diff --git a/pkg/sentry/arch/signal_stack.go b/pkg/sentry/arch/signal_stack.go new file mode 100644 index 000000000..7c6531d79 --- /dev/null +++ b/pkg/sentry/arch/signal_stack.go @@ -0,0 +1,58 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package arch + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // SignalStackFlagOnStack is possible set on return from getaltstack, + // in order to indicate that the thread is currently on the alt stack. + SignalStackFlagOnStack = 1 + + // SignalStackFlagDisable is a flag to indicate the stack is disabled. + SignalStackFlagDisable = 2 +) + +// IsEnabled returns true iff this signal stack is marked as enabled. +func (s SignalStack) IsEnabled() bool { + return s.Flags&SignalStackFlagDisable == 0 +} + +// Top returns the stack's top address. +func (s SignalStack) Top() usermem.Addr { + return usermem.Addr(s.Addr + s.Size) +} + +// SetOnStack marks this signal stack as in use. (This is only called on copies +// sent to user applications, so there's no corresponding ClearOnStack.) +func (s *SignalStack) SetOnStack() { + s.Flags |= SignalStackFlagOnStack +} + +// NativeSignalStack is a type that is equivalent to stack_t in the guest +// architecture. +type NativeSignalStack interface { + // SerializeFrom copies the data in the host SignalStack s into this + // object. + SerializeFrom(s *SignalStack) + + // DeserializeTo copies the data in this object into the host SignalStack + // s. + DeserializeTo(s *SignalStack) +} diff --git a/pkg/sentry/arch/stack.go b/pkg/sentry/arch/stack.go new file mode 100644 index 000000000..6c1b9be82 --- /dev/null +++ b/pkg/sentry/arch/stack.go @@ -0,0 +1,246 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arch + +import ( + "encoding/binary" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Stack is a simple wrapper around a usermem.IO and an address. +type Stack struct { + // Our arch info. + // We use this for automatic Native conversion of usermem.Addrs during + // Push() and Pop(). + Arch Context + + // The interface used to actually copy user memory. + IO usermem.IO + + // Our current stack bottom. + Bottom usermem.Addr +} + +// Push pushes the given values on to the stack. +// +// (This method supports Addrs and treats them as native types.) +func (s *Stack) Push(vals ...interface{}) (usermem.Addr, error) { + for _, v := range vals { + + // We convert some types to well-known serializable quanities. + var norm interface{} + + // For array types, we will automatically add an appropriate + // terminal value. This is done simply to make the interface + // easier to use. + var term interface{} + + switch v.(type) { + case string: + norm = []byte(v.(string)) + term = byte(0) + case []int8, []uint8: + norm = v + term = byte(0) + case []int16, []uint16: + norm = v + term = uint16(0) + case []int32, []uint32: + norm = v + term = uint32(0) + case []int64, []uint64: + norm = v + term = uint64(0) + case []usermem.Addr: + // Special case: simply push recursively. + _, err := s.Push(s.Arch.Native(uintptr(0))) + if err != nil { + return 0, err + } + varr := v.([]usermem.Addr) + for i := len(varr) - 1; i >= 0; i-- { + _, err := s.Push(varr[i]) + if err != nil { + return 0, err + } + } + continue + case usermem.Addr: + norm = s.Arch.Native(uintptr(v.(usermem.Addr))) + default: + norm = v + } + + if term != nil { + _, err := s.Push(term) + if err != nil { + return 0, err + } + } + + c := binary.Size(norm) + if c < 0 { + return 0, fmt.Errorf("bad binary.Size for %T", v) + } + // TODO: Use a real context.Context. + n, err := usermem.CopyObjectOut(context.Background(), s.IO, s.Bottom-usermem.Addr(c), norm, usermem.IOOpts{}) + if err != nil || c != n { + return 0, err + } + + s.Bottom -= usermem.Addr(n) + } + + return s.Bottom, nil +} + +// Pop pops the given values off the stack. +// +// (This method supports Addrs and treats them as native types.) +func (s *Stack) Pop(vals ...interface{}) (usermem.Addr, error) { + for _, v := range vals { + + vaddr, isVaddr := v.(*usermem.Addr) + + var n int + var err error + if isVaddr { + value := s.Arch.Native(uintptr(0)) + // TODO: Use a real context.Context. + n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, value, usermem.IOOpts{}) + *vaddr = usermem.Addr(s.Arch.Value(value)) + } else { + // TODO: Use a real context.Context. + n, err = usermem.CopyObjectIn(context.Background(), s.IO, s.Bottom, v, usermem.IOOpts{}) + } + if err != nil { + return 0, err + } + + s.Bottom += usermem.Addr(n) + } + + return s.Bottom, nil +} + +// Align aligns the stack to the given offset. +func (s *Stack) Align(offset int) { + if s.Bottom%usermem.Addr(offset) != 0 { + s.Bottom -= (s.Bottom % usermem.Addr(offset)) + } +} + +// StackLayout describes the location of the arguments and environment on the +// stack. +type StackLayout struct { + // ArgvStart is the beginning of the argument vector. + ArgvStart usermem.Addr + + // ArgvEnd is the end of the argument vector. + ArgvEnd usermem.Addr + + // EnvvStart is the beginning of the environment vector. + EnvvStart usermem.Addr + + // EnvvEnd is the end of the environment vector. + EnvvEnd usermem.Addr +} + +// Load pushes the given args, env and aux vector to the stack using the +// well-known format for a new executable. It returns the start and end +// of the argument and environment vectors. +func (s *Stack) Load(args []string, env []string, aux Auxv) (StackLayout, error) { + l := StackLayout{} + + // Make sure we start with a 16-byte alignment. + s.Align(16) + + // Push our strings. + l.ArgvEnd = s.Bottom + argAddrs := make([]usermem.Addr, len(args)) + for i := len(args) - 1; i >= 0; i-- { + addr, err := s.Push(args[i]) + if err != nil { + return StackLayout{}, err + } + argAddrs[i] = addr + } + l.ArgvStart = s.Bottom + + // Push our environment. + l.EnvvEnd = s.Bottom + envAddrs := make([]usermem.Addr, len(env)) + for i := len(env) - 1; i >= 0; i-- { + addr, err := s.Push(env[i]) + if err != nil { + return StackLayout{}, err + } + envAddrs[i] = addr + } + l.EnvvStart = s.Bottom + + // We need to align the arguments appropriately. + // + // We must finish on a 16-byte alignment, but we'll play it + // conservatively and finish at 32-bytes. It would be nice to be able + // to call Align here, but unfortunately we need to align the stack + // with all the variable sized arrays pushed. So we just need to do + // some calculations. + argvSize := s.Arch.Width() * uint(len(args)+1) + envvSize := s.Arch.Width() * uint(len(env)+1) + auxvSize := s.Arch.Width() * 2 * uint(len(aux)+1) + total := usermem.Addr(argvSize) + usermem.Addr(envvSize) + usermem.Addr(auxvSize) + usermem.Addr(s.Arch.Width()) + expectedBottom := s.Bottom - total + if expectedBottom%32 != 0 { + s.Bottom -= expectedBottom % 32 + } + + // Push our auxvec. + // NOTE: We need an extra zero here per spec. + // The Push function will automatically terminate + // strings and arrays with a single null value. + auxv := make([]usermem.Addr, 0, len(aux)) + for _, a := range aux { + auxv = append(auxv, usermem.Addr(a.Key), a.Value) + } + auxv = append(auxv, usermem.Addr(0)) + _, err := s.Push(auxv) + if err != nil { + return StackLayout{}, err + } + + // Push environment. + _, err = s.Push(envAddrs) + if err != nil { + return StackLayout{}, err + } + + // Push args. + _, err = s.Push(argAddrs) + if err != nil { + return StackLayout{}, err + } + + // Push arg count. + _, err = s.Push(usermem.Addr(len(args))) + if err != nil { + return StackLayout{}, err + } + + return l, nil +} diff --git a/pkg/sentry/arch/syscalls_amd64.go b/pkg/sentry/arch/syscalls_amd64.go new file mode 100644 index 000000000..41d8ba0d1 --- /dev/null +++ b/pkg/sentry/arch/syscalls_amd64.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package arch + +const restartSyscallNr = uintptr(219) + +// SyscallNo returns the syscall number according to the 64-bit convention. +func (c *context64) SyscallNo() uintptr { + return uintptr(c.Regs.Orig_rax) +} + +// SyscallArgs provides syscall arguments according to the 64-bit convention. +// +// Due to the way addresses are mapped for the sentry this binary *must* be +// built in 64-bit mode. So we can just assume the syscall numbers that come +// back match the expected host system call numbers. +func (c *context64) SyscallArgs() SyscallArguments { + return SyscallArguments{ + SyscallArgument{Value: uintptr(c.Regs.Rdi)}, + SyscallArgument{Value: uintptr(c.Regs.Rsi)}, + SyscallArgument{Value: uintptr(c.Regs.Rdx)}, + SyscallArgument{Value: uintptr(c.Regs.R10)}, + SyscallArgument{Value: uintptr(c.Regs.R8)}, + SyscallArgument{Value: uintptr(c.Regs.R9)}, + } +} + +// RestartSyscall implements Context.RestartSyscall. +func (c *context64) RestartSyscall() { + c.Regs.Rip -= SyscallWidth + c.Regs.Rax = c.Regs.Orig_rax +} + +// RestartSyscallWithRestartBlock implements Context.RestartSyscallWithRestartBlock. +func (c *context64) RestartSyscallWithRestartBlock() { + c.Regs.Rip -= SyscallWidth + c.Regs.Rax = uint64(restartSyscallNr) +} diff --git a/pkg/sentry/context/BUILD b/pkg/sentry/context/BUILD new file mode 100644 index 000000000..ff39f94ba --- /dev/null +++ b/pkg/sentry/context/BUILD @@ -0,0 +1,14 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "context", + srcs = ["context.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/context", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/amutex", + "//pkg/log", + ], +) diff --git a/pkg/sentry/context/context.go b/pkg/sentry/context/context.go new file mode 100644 index 000000000..e0dffafba --- /dev/null +++ b/pkg/sentry/context/context.go @@ -0,0 +1,103 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package context defines the sentry's Context type. +package context + +import ( + "gvisor.googlesource.com/gvisor/pkg/amutex" + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// A Context represents a thread of execution (hereafter "goroutine" to reflect +// Go idiosyncrasy). It carries state associated with the goroutine across API +// boundaries. +// +// While Context exists for essentially the same reasons as Go's standard +// context.Context, the standard type represents the state of an operation +// rather than that of a goroutine. This is a critical distinction: +// +// - Unlike context.Context, which "may be passed to functions running in +// different goroutines", it is *not safe* to use the same Context in multiple +// concurrent goroutines. +// +// - It is *not safe* to retain a Context passed to a function beyond the scope +// of that function call. +// +// In both cases, values extracted from the Context should be used instead. +type Context interface { + log.Logger + amutex.Sleeper + + // UninterruptibleSleepStart indicates the beginning of an uninterruptible + // sleep state (equivalent to Linux's TASK_UNINTERRUPTIBLE). If deactivate + // is true and the Context represents a Task, the Task's AddressSpace is + // deactivated. + UninterruptibleSleepStart(deactivate bool) + + // UninterruptibleSleepFinish indicates the end of an uninterruptible sleep + // state that was begun by a previous call to UninterruptibleSleepStart. If + // activate is true and the Context represents a Task, the Task's + // AddressSpace is activated. Normally activate is the same value as the + // deactivate parameter passed to UninterruptibleSleepStart. + UninterruptibleSleepFinish(activate bool) + + // Value returns the value associated with this Context for key, or nil if + // no value is associated with key. Successive calls to Value with the same + // key returns the same result. + // + // A key identifies a specific value in a Context. Functions that wish to + // retrieve values from Context typically allocate a key in a global + // variable then use that key as the argument to Context.Value. A key can + // be any type that supports equality; packages should define keys as an + // unexported type to avoid collisions. + Value(key interface{}) interface{} +} + +type logContext struct { + log.Logger + NoopSleeper +} + +// Value implements Context.Value. +func (logContext) Value(key interface{}) interface{} { + return nil +} + +// NoopSleeper is a noop implementation of amutex.Sleeper and +// Context.UninterruptibleSleep* methods for anonymous embedding in other types +// that do not want to notify kernel.Task about sleeps. +type NoopSleeper struct { + amutex.NoopSleeper +} + +// UninterruptibleSleepStart does nothing. +func (NoopSleeper) UninterruptibleSleepStart(bool) {} + +// UninterruptibleSleepFinish does nothing. +func (NoopSleeper) UninterruptibleSleepFinish(bool) {} + +// Background returns an empty context using the default logger. +// +// Users should be wary of using a Background context. Please tag any use with +// FIXME and a note to remove this use. +// +// Generally, one should use the Task as their context when available, or avoid +// having to use a context in places where a Task is unavailable. +// +// Using a Background context for tests is fine, as long as no values are +// needed from the context in the tested code paths. +func Background() Context { + return logContext{Logger: log.Log()} +} diff --git a/pkg/sentry/context/contexttest/BUILD b/pkg/sentry/context/contexttest/BUILD new file mode 100644 index 000000000..5977344de --- /dev/null +++ b/pkg/sentry/context/contexttest/BUILD @@ -0,0 +1,34 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "contexttest_state", + srcs = [ + "contexttest.go", + ], + out = "contexttest_state.go", + package = "contexttest", +) + +go_library( + name = "contexttest", + testonly = 1, + srcs = [ + "contexttest.go", + "contexttest_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/platform", + "//pkg/sentry/platform/ptrace", + "//pkg/sentry/uniqueid", + "//pkg/state", + ], +) diff --git a/pkg/sentry/context/contexttest/contexttest.go b/pkg/sentry/context/contexttest/contexttest.go new file mode 100644 index 000000000..193ce3440 --- /dev/null +++ b/pkg/sentry/context/contexttest/contexttest.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package contexttest builds a test context.Context. +package contexttest + +import ( + "sync/atomic" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ptrace" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" +) + +// Context returns a Context that may be used in tests. Uses ptrace as the +// platform.Platform. +func Context(tb testing.TB) context.Context { + p, err := ptrace.New() + if err != nil { + tb.Fatal(err) + } + // Test usage of context.Background is fine. + return &testContext{ + Context: context.Background(), + l: limits.NewLimitSet(), + platform: p, + } +} + +type testContext struct { + context.Context + l *limits.LimitSet + platform platform.Platform +} + +// globalUniqueID tracks incremental unique identifiers for tests. +var globalUniqueID uint64 + +// lastInotifyCookie is a monotonically increasing counter for generating unique +// inotify cookies. Must be accessed using atomic ops. +var lastInotifyCookie uint32 + +// hostClock implements ktime.Clock. +type hostClock struct { + ktime.WallRateClock + ktime.NoClockEvents +} + +// Now implements ktime.Clock.Now. +func (hostClock) Now() ktime.Time { + return ktime.FromNanoseconds(time.Now().UnixNano()) +} + +// Value implements context.Context. +func (t *testContext) Value(key interface{}) interface{} { + switch key { + case limits.CtxLimits: + return t.l + case platform.CtxPlatform: + return t.platform + case uniqueid.CtxGlobalUniqueID: + return atomic.AddUint64(&globalUniqueID, 1) + case uniqueid.CtxInotifyCookie: + return atomic.AddUint32(&lastInotifyCookie, 1) + case ktime.CtxRealtimeClock: + return hostClock{} + default: + return t.Context.Value(key) + } +} + +// RootContext returns a Context that may be used in tests that need root +// credentials. Uses ptrace as the platform.Platform. +func RootContext(tb testing.TB) context.Context { + return WithCreds(Context(tb), auth.NewRootCredentials(auth.NewRootUserNamespace())) +} + +// WithCreds returns a copy of ctx carrying creds. +func WithCreds(ctx context.Context, creds *auth.Credentials) context.Context { + return &authContext{ctx, creds} +} + +type authContext struct { + context.Context + creds *auth.Credentials +} + +// Value implements context.Context. +func (ac *authContext) Value(key interface{}) interface{} { + switch key { + case auth.CtxCredentials: + return ac.creds + default: + return ac.Context.Value(key) + } +} + +// WithLimitSet returns a copy of ctx carrying l. +func WithLimitSet(ctx context.Context, l *limits.LimitSet) context.Context { + return limitContext{ctx, l} +} + +type limitContext struct { + context.Context + l *limits.LimitSet +} + +// Value implements context.Context. +func (lc limitContext) Value(key interface{}) interface{} { + switch key { + case limits.CtxLimits: + return lc.l + default: + return lc.Context.Value(key) + } +} diff --git a/pkg/sentry/control/BUILD b/pkg/sentry/control/BUILD new file mode 100644 index 000000000..4d1d0d019 --- /dev/null +++ b/pkg/sentry/control/BUILD @@ -0,0 +1,39 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "control", + srcs = [ + "control.go", + "proc.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/control", + visibility = [ + "//pkg/sentry:internal", + ], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/fs", + "//pkg/sentry/fs/host", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/usage", + "//pkg/urpc", + ], +) + +go_test( + name = "control_test", + size = "small", + srcs = ["proc_test.go"], + embed = [":control"], + deps = [ + "//pkg/log", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usage", + ], +) diff --git a/pkg/sentry/control/control.go b/pkg/sentry/control/control.go new file mode 100644 index 000000000..a6ee6e649 --- /dev/null +++ b/pkg/sentry/control/control.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package control contains types that expose control server methods, and can +// be used to configure and interact with a running sandbox process. +package control diff --git a/pkg/sentry/control/proc.go b/pkg/sentry/control/proc.go new file mode 100644 index 000000000..7d06a1d04 --- /dev/null +++ b/pkg/sentry/control/proc.go @@ -0,0 +1,293 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package control + +import ( + "bytes" + "encoding/json" + "fmt" + "syscall" + "text/tabwriter" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/host" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/urpc" +) + +// Proc includes task-related functions. +// +// At the moment, this is limited to exec support. +type Proc struct { + Kernel *kernel.Kernel +} + +// ExecArgs is the set of arguments to exec. +type ExecArgs struct { + // Filename is the filename to load. + // + // If this is provided as "", then the file will be guessed via Argv[0]. + Filename string `json:"filename"` + + // Argv is a list of arguments. + Argv []string `json:"argv"` + + // Envv is a list of environment variables. + Envv []string `json:"envv"` + + // WorkingDirectory defines the working directory for the new process. + WorkingDirectory string `json:"wd"` + + // KUID is the UID to run with in the root user namespace. Defaults to + // root if not set explicitly. + KUID auth.KUID + + // KGID is the GID to run with in the root user namespace. Defaults to + // the root group if not set explicitly. + KGID auth.KGID + + // ExtraKGIDs is the list of additional groups to which the user + // belongs. + ExtraKGIDs []auth.KGID + + // Capabilities is the list of capabilities to give to the process. + Capabilities *auth.TaskCapabilities + + // Detach indicates whether Exec should detach once the process starts. + Detach bool + + // FilePayload determines the files to give to the new process. + urpc.FilePayload +} + +// Exec runs a new task. +func (proc *Proc) Exec(args *ExecArgs, waitStatus *uint32) error { + // Import file descriptors. + l := limits.NewLimitSet() + fdm := proc.Kernel.NewFDMap() + defer fdm.DecRef() + + creds := auth.NewUserCredentials( + args.KUID, + args.KGID, + args.ExtraKGIDs, + args.Capabilities, + proc.Kernel.RootUserNamespace()) + + initArgs := kernel.CreateProcessArgs{ + Filename: args.Filename, + Argv: args.Argv, + Envv: args.Envv, + WorkingDirectory: args.WorkingDirectory, + Credentials: creds, + FDMap: fdm, + Umask: 0022, + Limits: l, + MaxSymlinkTraversals: linux.MaxSymlinkTraversals, + UTSNamespace: proc.Kernel.RootUTSNamespace(), + IPCNamespace: proc.Kernel.RootIPCNamespace(), + } + ctx := initArgs.NewContext(proc.Kernel) + mounter := fs.FileOwnerFromContext(ctx) + + for appFD, f := range args.FilePayload.Files { + // Copy the underlying FD. + newFD, err := syscall.Dup(int(f.Fd())) + if err != nil { + return err + } + f.Close() + + // Install the given file as an FD. + file, err := host.NewFile(ctx, newFD, mounter) + if err != nil { + syscall.Close(newFD) + return err + } + defer file.DecRef() + if err := fdm.NewFDAt(kdefs.FD(appFD), file, kernel.FDFlags{}, l); err != nil { + return err + } + } + + // Start the new task. + newTG, err := proc.Kernel.CreateProcess(initArgs) + if err != nil { + return err + } + + // If we're supposed to detach, don't wait for the process to exit. + if args.Detach { + *waitStatus = 0 + return nil + } + + // Wait for completion. + newTG.WaitExited() + *waitStatus = newTG.ExitStatus().Status() + return nil +} + +// PsArgs is the set of arguments to ps. +type PsArgs struct { + // JSON will force calls to Ps to return the result as a JSON payload. + JSON bool +} + +// Ps provides a process listing for the running kernel. +func (proc *Proc) Ps(args *PsArgs, out *string) error { + var p []*Process + if e := Processes(proc.Kernel, &p); e != nil { + return e + } + if !args.JSON { + *out = ProcessListToTable(p) + } else { + s, e := ProcessListToJSON(p) + if e != nil { + return e + } + *out = s + } + return nil +} + +// Process contains information about a single process in a Sandbox. +// TODO: Implement TTY field. +type Process struct { + UID auth.KUID `json:"uid"` + PID kernel.ThreadID `json:"pid"` + // Parent PID + PPID kernel.ThreadID `json:"ppid"` + // Processor utilization + C int32 `json:"c"` + // Start time + STime string `json:"stime"` + // CPU time + Time string `json:"time"` + // Executable shortname (e.g. "sh" for /bin/sh) + Cmd string `json:"cmd"` +} + +// ProcessListToTable prints a table with the following format: +// UID PID PPID C STIME TIME CMD +// 0 1 0 0 14:04 505262ns tail +func ProcessListToTable(pl []*Process) string { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 1, 3, ' ', 0) + fmt.Fprint(tw, "UID\tPID\tPPID\tC\tSTIME\tTIME\tCMD") + for _, d := range pl { + fmt.Fprintf(tw, "\n%d\t%d\t%d\t%d\t%s\t%s\t%s", + d.UID, + d.PID, + d.PPID, + d.C, + d.STime, + d.Time, + d.Cmd) + } + tw.Flush() + return buf.String() +} + +// ProcessListToJSON will return the JSON representation of ps. +func ProcessListToJSON(pl []*Process) (string, error) { + b, err := json.Marshal(pl) + if err != nil { + return "", fmt.Errorf("couldn't marshal process list %v: %v", pl, err) + } + return string(b), nil +} + +// PrintPIDsJSON prints a JSON object containing only the PIDs in pl. This +// behavior is the same as runc's. +func PrintPIDsJSON(pl []*Process) (string, error) { + pids := make([]kernel.ThreadID, 0, len(pl)) + for _, d := range pl { + pids = append(pids, d.PID) + } + b, err := json.Marshal(pids) + if err != nil { + return "", fmt.Errorf("couldn't marshal PIDs %v: %v", pids, err) + } + return string(b), nil +} + +// Processes retrieves information about processes running in the sandbox. +func Processes(k *kernel.Kernel, out *[]*Process) error { + ts := k.TaskSet() + now := k.RealtimeClock().Now() + for _, tg := range ts.Root.ThreadGroups() { + pid := ts.Root.IDOfThreadGroup(tg) + // If tg has already been reaped ignore it. + if pid == 0 { + continue + } + + *out = append(*out, &Process{ + UID: tg.Leader().Credentials().EffectiveKUID, + PID: pid, + // If Parent is null (i.e. tg is the init process), PPID will be 0. + PPID: ts.Root.IDOfTask(tg.Leader().Parent()), + STime: formatStartTime(now, tg.Leader().StartTime()), + C: percentCPU(tg.CPUStats(), tg.Leader().StartTime(), now), + Time: tg.CPUStats().SysTime.String(), + Cmd: tg.Leader().Name(), + }) + } + return nil +} + +// formatStartTime formats startTime depending on the current time: +// - If startTime was today, HH:MM is used. +// - If startTime was not today but was this year, MonDD is used (e.g. Jan02) +// - If startTime was not this year, the year is used. +func formatStartTime(now, startTime ktime.Time) string { + nowS, nowNs := now.Unix() + n := time.Unix(nowS, nowNs) + startTimeS, startTimeNs := startTime.Unix() + st := time.Unix(startTimeS, startTimeNs) + format := "15:04" + if st.YearDay() != n.YearDay() { + format = "Jan02" + } + if st.Year() != n.Year() { + format = "2006" + } + return st.Format(format) +} + +func percentCPU(stats usage.CPUStats, startTime, now ktime.Time) int32 { + // Note: In procps, there is an option to include child CPU stats. As + // it is disabled by default, we do not include them. + total := stats.UserTime + stats.SysTime + lifetime := now.Sub(startTime) + if lifetime <= 0 { + return 0 + } + percentCPU := total * 100 / lifetime + // Cap at 99% since procps does the same. + if percentCPU > 99 { + percentCPU = 99 + } + return int32(percentCPU) +} diff --git a/pkg/sentry/control/proc_test.go b/pkg/sentry/control/proc_test.go new file mode 100644 index 000000000..18286496f --- /dev/null +++ b/pkg/sentry/control/proc_test.go @@ -0,0 +1,164 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package control + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/log" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" +) + +func init() { + log.SetLevel(log.Debug) +} + +// Tests that ProcessData.Table() prints with the correct format. +func TestProcessListTable(t *testing.T) { + testCases := []struct { + pl []*Process + expected string + }{ + { + pl: []*Process{}, + expected: "UID PID PPID C STIME TIME CMD", + }, + { + pl: []*Process{ + &Process{ + UID: 0, + PID: 0, + PPID: 0, + C: 0, + STime: "0", + Time: "0", + Cmd: "zero", + }, + &Process{ + UID: 1, + PID: 1, + PPID: 1, + C: 1, + STime: "1", + Time: "1", + Cmd: "one", + }, + }, + expected: `UID PID PPID C STIME TIME CMD +0 0 0 0 0 0 zero +1 1 1 1 1 1 one`, + }, + } + + for _, tc := range testCases { + output := ProcessListToTable(tc.pl) + + if tc.expected != output { + t.Errorf("PrintTable(%v): got:\n%s\nwant:\n%s", tc.pl, output, tc.expected) + } + } +} + +func TestProcessListJSON(t *testing.T) { + testCases := []struct { + pl []*Process + expected string + }{ + { + pl: []*Process{}, + expected: "[]", + }, + { + pl: []*Process{ + &Process{ + UID: 0, + PID: 0, + PPID: 0, + C: 0, + STime: "0", + Time: "0", + Cmd: "zero", + }, + &Process{ + UID: 1, + PID: 1, + PPID: 1, + C: 1, + STime: "1", + Time: "1", + Cmd: "one", + }, + }, + expected: "[0,1]", + }, + } + + for _, tc := range testCases { + output, err := PrintPIDsJSON(tc.pl) + if err != nil { + t.Errorf("failed to generate JSON: %v", err) + } + + if tc.expected != output { + t.Errorf("PrintJSON(%v): got:\n%s\nwant:\n%s", tc.pl, output, tc.expected) + } + } +} + +func TestPercentCPU(t *testing.T) { + testCases := []struct { + stats usage.CPUStats + startTime ktime.Time + now ktime.Time + expected int32 + }{ + { + // Verify that 100% use is capped at 99. + stats: usage.CPUStats{UserTime: 1e9, SysTime: 1e9}, + startTime: ktime.FromNanoseconds(7e9), + now: ktime.FromNanoseconds(9e9), + expected: 99, + }, + { + // Verify that if usage > lifetime, we get at most 99% + // usage. + stats: usage.CPUStats{UserTime: 2e9, SysTime: 2e9}, + startTime: ktime.FromNanoseconds(7e9), + now: ktime.FromNanoseconds(9e9), + expected: 99, + }, + { + // Verify that 50% usage is reported correctly. + stats: usage.CPUStats{UserTime: 1e9, SysTime: 1e9}, + startTime: ktime.FromNanoseconds(12e9), + now: ktime.FromNanoseconds(16e9), + expected: 50, + }, + { + // Verify that 0% usage is reported correctly. + stats: usage.CPUStats{UserTime: 0, SysTime: 0}, + startTime: ktime.FromNanoseconds(12e9), + now: ktime.FromNanoseconds(14e9), + expected: 0, + }, + } + + for _, tc := range testCases { + if pcpu := percentCPU(tc.stats, tc.startTime, tc.now); pcpu != tc.expected { + t.Errorf("percentCPU(%v, %v, %v): got %d, want %d", tc.stats, tc.startTime, tc.now, pcpu, tc.expected) + } + } +} diff --git a/pkg/sentry/device/BUILD b/pkg/sentry/device/BUILD new file mode 100644 index 000000000..1a8b461ba --- /dev/null +++ b/pkg/sentry/device/BUILD @@ -0,0 +1,18 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "device", + srcs = ["device.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/device", + visibility = ["//pkg/sentry:internal"], + deps = ["//pkg/abi/linux"], +) + +go_test( + name = "device_test", + size = "small", + srcs = ["device_test.go"], + embed = [":device"], +) diff --git a/pkg/sentry/device/device.go b/pkg/sentry/device/device.go new file mode 100644 index 000000000..a5514c72f --- /dev/null +++ b/pkg/sentry/device/device.go @@ -0,0 +1,193 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package device defines reserved virtual kernel devices and structures +// for managing them. +// +// Saving and restoring devices is not necessary if the devices are initialized +// as package global variables. Package initialization happens in a single goroutine +// and in a deterministic order, so minor device numbers will be assigned in the +// same order as packages are loaded. +package device + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// ID identifies a device. +type ID struct { + Major uint64 + Minor uint64 +} + +// DeviceID formats a major and minor device number into a standard device number. +func (i *ID) DeviceID() uint64 { + return uint64(linux.MakeDeviceID(uint16(i.Major), uint32(i.Minor))) +} + +// nextAnonDeviceMinor is the next minor number for a new anonymous device. +// Must be accessed atomically. +var nextAnonDeviceMinor uint64 + +// NewAnonDevice creates a new anonymous device. Packages that require an anonymous +// device should initialize the device in a global variable in a file called device.go: +// +// var myDevice = device.NewAnonDevice() +func NewAnonDevice() *Device { + return &Device{ + ID: newAnonID(), + } +} + +// NewAnonMultiDevice creates a new multi-keyed anonymous device. Packages that require +// a multi-key anonymous device should initialize the device in a global variable in a +// file called device.go: +// +// var myDevice = device.NewAnonMultiDevice() +func NewAnonMultiDevice() *MultiDevice { + return &MultiDevice{ + ID: newAnonID(), + } +} + +// newAnonID assigns a major and minor number to an anonymous device ID. +func newAnonID() ID { + return ID{ + // Anon devices always have a major number of 0. + Major: 0, + // Use the next minor number. + Minor: atomic.AddUint64(&nextAnonDeviceMinor, 1), + } +} + +// Device is a simple virtual kernel device. +type Device struct { + ID + + // last is the last generated inode. + last uint64 +} + +// NextIno generates a new inode number +func (d *Device) NextIno() uint64 { + return atomic.AddUint64(&d.last, 1) +} + +// MultiDeviceKey provides a hashable key for a MultiDevice. The key consists +// of a raw device and inode for a resource, which must consistently identify +// the unique resource. It may optionally include a secondary device if +// appropriate. +// +// Note that using the path is not enough, because filesystems may rename a file +// to a different backing resource, at which point the path points to a different +// entity. Using only the inode is also not enough because the inode is assumed +// to be unique only within the device on which the resource exists. +type MultiDeviceKey struct { + Device uint64 + SecondaryDevice string + Inode uint64 +} + +// String stringifies the key. +func (m MultiDeviceKey) String() string { + return fmt.Sprintf("key{device: %d, sdevice: %s, inode: %d}", m.Device, m.SecondaryDevice, m.Inode) +} + +// MultiDevice allows for remapping resources that come from a variety of raw +// devices into a single device. The device ID should be one of the static +// Device IDs above and cannot be reused. +type MultiDevice struct { + ID + + mu sync.Mutex + last uint64 + cache map[MultiDeviceKey]uint64 + rcache map[uint64]MultiDeviceKey +} + +// String stringifies MultiDevice. +func (m *MultiDevice) String() string { + buf := bytes.NewBuffer(nil) + buf.WriteString("cache{") + for k, v := range m.cache { + buf.WriteString(fmt.Sprintf("%s -> %d, ", k, v)) + } + buf.WriteString("}") + return buf.String() +} + +// Map maps a raw device and inode into the inode space of MultiDevice, +// returning a virtualized inode. Raw devices and inodes can be reused; +// in this case, the same virtual inode will be returned. +func (m *MultiDevice) Map(key MultiDeviceKey) uint64 { + m.mu.Lock() + defer m.mu.Unlock() + + if m.cache == nil { + m.cache = make(map[MultiDeviceKey]uint64) + m.rcache = make(map[uint64]MultiDeviceKey) + } + + id, ok := m.cache[key] + if ok { + return id + } + // Step over reserved entries that may have been loaded. + idx := m.last + 1 + for { + if _, ok := m.rcache[idx]; !ok { + break + } + idx++ + } + // We found a non-reserved entry, use it. + m.last = idx + m.cache[key] = m.last + m.rcache[m.last] = key + return m.last +} + +// Load loads a raw device and inode into MultiDevice inode mappings +// with value as the virtual inode. +// +// By design, inodes start from 1 and continue until max uint64. This means +// that the zero value, which is often the uninitialized value, can be rejected +// as invalid. +func (m *MultiDevice) Load(key MultiDeviceKey, value uint64) bool { + // Reject the uninitialized value; see comment above. + if value == 0 { + return false + } + + m.mu.Lock() + defer m.mu.Unlock() + + if m.cache == nil { + m.cache = make(map[MultiDeviceKey]uint64) + m.rcache = make(map[uint64]MultiDeviceKey) + } + + // Cache value at key. + m.cache[key] = value + + // Prevent value from being used by new inode mappings. + m.rcache[value] = key + + return true +} diff --git a/pkg/sentry/device/device_test.go b/pkg/sentry/device/device_test.go new file mode 100644 index 000000000..dfec45046 --- /dev/null +++ b/pkg/sentry/device/device_test.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package device + +import ( + "testing" +) + +func TestMultiDevice(t *testing.T) { + device := &MultiDevice{} + + // Check that Load fails to install virtual inodes that are + // uninitialized. + if device.Load(MultiDeviceKey{}, 0) { + t.Fatalf("got load of invalid virtual inode 0, want unsuccessful") + } + + inode := device.Map(MultiDeviceKey{}) + + // Assert that the same raw device and inode map to + // a consistent virtual inode. + if i := device.Map(MultiDeviceKey{}); i != inode { + t.Fatalf("got inode %d, want %d in %s", i, inode, device) + } + + // Assert that a new inode or new device does not conflict. + if i := device.Map(MultiDeviceKey{Device: 0, Inode: 1}); i == inode { + t.Fatalf("got reused inode %d, want new distinct inode in %s", i, device) + } + last := device.Map(MultiDeviceKey{Device: 1, Inode: 0}) + if last == inode { + t.Fatalf("got reused inode %d, want new distinct inode in %s", last, device) + } + + // Virtual is the virtual inode we want to load. + virtual := last + 1 + + // Assert that we can load a virtual inode at a new place. + if !device.Load(MultiDeviceKey{Device: 0, Inode: 2}, virtual) { + t.Fatalf("got load of virtual inode %d failed, want success in %s", virtual, device) + } + + // Assert that the next inode skips over the loaded one. + if i := device.Map(MultiDeviceKey{Device: 0, Inode: 3}); i != virtual+1 { + t.Fatalf("got inode %d, want %d in %s", i, virtual+1, device) + } +} diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD new file mode 100644 index 000000000..9b7264753 --- /dev/null +++ b/pkg/sentry/fs/BUILD @@ -0,0 +1,154 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "fs_state", + srcs = [ + "attr.go", + "dentry.go", + "dirent.go", + "dirent_cache.go", + "dirent_list.go", + "dirent_state.go", + "file.go", + "file_overlay.go", + "file_state.go", + "filesystems.go", + "flags.go", + "inode.go", + "inode_inotify.go", + "inode_operations.go", + "inode_overlay.go", + "inotify.go", + "inotify_event.go", + "inotify_watch.go", + "mock.go", + "mount.go", + "mount_overlay.go", + "mount_state.go", + "mounts.go", + "overlay.go", + "path.go", + ], + out = "fs_state.go", + package = "fs", +) + +go_library( + name = "fs", + srcs = [ + "attr.go", + "context.go", + "copy_up.go", + "dentry.go", + "dirent.go", + "dirent_cache.go", + "dirent_list.go", + "dirent_state.go", + "file.go", + "file_operations.go", + "file_overlay.go", + "file_state.go", + "filesystems.go", + "flags.go", + "fs.go", + "fs_state.go", + "inode.go", + "inode_inotify.go", + "inode_operations.go", + "inode_overlay.go", + "inotify.go", + "inotify_event.go", + "inotify_watch.go", + "mock.go", + "mount.go", + "mount_overlay.go", + "mount_state.go", + "mounts.go", + "offset.go", + "overlay.go", + "path.go", + "restore.go", + "save.go", + "seek.go", + "sync.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/ilist", + "//pkg/log", + "//pkg/p9", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs/lock", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_template_instance( + name = "dirent_list", + out = "dirent_list.go", + package = "fs", + prefix = "dirent", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Dirent", + }, +) + +go_test( + name = "fs_x_test", + size = "small", + srcs = [ + "copy_up_test.go", + "file_overlay_test.go", + "inode_overlay_test.go", + "mounts_test.go", + ], + deps = [ + ":fs", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs/ramfs/test", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/usermem", + "//pkg/syserror", + ], +) + +go_test( + name = "fs_test", + size = "small", + srcs = [ + "dirent_cache_test.go", + "dirent_refs_test.go", + "file_test.go", + "mount_test.go", + "path_test.go", + ], + embed = [":fs"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + ], +) diff --git a/pkg/sentry/fs/README.md b/pkg/sentry/fs/README.md new file mode 100644 index 000000000..898271ee8 --- /dev/null +++ b/pkg/sentry/fs/README.md @@ -0,0 +1,217 @@ +This package provides an implementation of the Linux virtual filesystem. + +[TOC] + +## Overview + +- An `fs.Dirent` caches an `fs.Inode` in memory at a path in the VFS, giving + the `fs.Inode` a relative position with respect to other `fs.Inode`s. + +- If an `fs.Dirent` is referenced by two file descriptors, then those file + descriptors are coherent with each other: they depend on the same + `fs.Inode`. + +- A mount point is an `fs.Dirent` for which `fs.Dirent.mounted` is true. It + exposes the root of a mounted filesystem. + +- The `fs.Inode` produced by a registered filesystem on mount(2) owns an + `fs.MountedFilesystem` from which other `fs.Inode`s will be looked up. For a + remote filesystem, the `fs.MountedFilesystem` owns the connection to that + remote filesystem. + +- In general: + +``` +fs.Inode <------------------------------ +| | +| | +produced by | +exactly one | +| responsible for the +| virtual identity of +v | +fs.MountedFilesystem ------------------- +``` + +Glossary: + +- VFS: virtual filesystem. + +- inode: a virtual file object holding a cached view of a file on a backing + filesystem (includes metadata and page caches). + +- superblock: the virtual state of a mounted filesystem (e.g. the virtual + inode number set). + +- mount namespace: a view of the mounts under a root (during path traversal, + the VFS makes visible/follows the mount point that is in the current task's + mount namespace). + +## Save and restore + +An application's hard dependencies on filesystem state can be broken down into +two categories: + +- The state necessary to execute a traversal on or view the *virtual* + filesystem hierarchy, regardless of what files an application has open. + +- The state necessary to represent open files. + +The first is always necessary to save and restore. An application may never have +any open file descriptors, but across save and restore it should see a coherent +view of any mount namespace. NOTE: Currently only one "initial" +mount namespace is supported. + +The second is so that system calls across save and restore are coherent with +each other (e.g. so that unintended re-reads or overwrites do not occur). + +Specifically this state is: + +- An `fs.MountManager` containing mount points. + +- A `kernel.FDMap` containing pointers to open files. + +Anything else managed by the VFS that can be easily loaded into memory from a +filesystem is synced back to those filesystems and is no saved. Examples are +pages in page caches used for optimizations (i.e. readahead and writeback), and +directory entries used to accelerate path lookups. + +### Mount points + +Saving and restoring a mount point means saving and restoring: + +- The root of the mounted filesystem. + +- Mount flags, which control how the VFS interacts with the mounted + filesystem. + +- Any relevant metadata about the mounted filesystem. + +- All `fs.Inode`s referenced by the application that reside under the mount + point. + +`fs.MountedFilesystem` is metadata about a filesystem that is mounted. It is +referenced by every `fs.Inode` loaded into memory under the mount point +including the `fs.Inode` of the mount point itself. The `fs.MountedFilesystem` +maps file objects on the filesystem to a virtualized `fs.Inode` number and vice +versa. + +To restore all `fs.Inode`s under a given mount point, each `fs.Inode` leverages +its dependency on an `fs.MountedFilesystem`. Since the `fs.MountedFilesystem` +knows how an `fs.Inode` maps to a file object on a backing filesystem, this +mapping can be trivially consulted by each `fs.Inode` when the `fs.Inode` is +restored. + +In detail, a mount point is saved in two steps: + +- First, after the kernel is paused but before state.Save, we walk all mount + namespaces and install a mapping from `fs.Inode` numbers to file paths + relative to the root of the mounted filesystem in each + `fs.MountedFilesystem`. This is subsequently called the set of `fs.Inode` + mappings. + +- Second, during state.Save, each `fs.MountedFilesystem` decides whether to + save the set of `fs.Inode` mappings. In-memory filesystems, like tmpfs, have + no need to save a set of `fs.Inode` mappings, since the `fs.Inode`s can be + entirely encoded in state file. Each `fs.MountedFilesystem` also optionally + saves the device name from when the filesystem was originally mounted. Each + `fs.Inode` saves its virtual identifier and a reference to a + `fs.MountedFilesystem`. + +A mount point is restored in two steps: + +- First, before state.Load, all mount configurations are stored in a global + `fs.RestoreEnvironment`. This tells us what mount points the user wants to + restore and how to re-establish pointers to backing filesystems. + +- Second, during state.Load, each `fs.MountedFilesystem` optionally searches + for a mount in the `fs.RestoreEnvironment` that matches its saved device + name. The `fs.MountedFilesystem` then restablishes a pointer to the root of + the mounted filesystem. For example, the mount specification provides the + network connection for a mounted remote filesystem client to communicate + with its remote file server. The `fs.MountedFilesystem` also trivially loads + its set of `fs.Inode` mappings. When an `fs.Inode` is encountered, the + `fs.Inode` loads its virtual identifier and its reference a + `fs.MountedFilesystem`. It uses the `fs.MountedFilesystem` to obtain the + root of the mounted filesystem and the `fs.Inode` mappings to obtain the + relative file path to its data. With these, the `fs.Inode` re-establishes a + pointer to its file object. + +A mount point can trivially restore its `fs.Inode`s in parallel since +`fs.Inode`s have a restore dependency on their `fs.MountedFilesystem` and not on +each other. + +### Open files + +An `fs.File` references the following filesystem objects: + +```go +fs.File -> fs.Dirent -> fs.Inode -> fs.MountedFilesystem +``` + +The `fs.Inode` is restored using its `fs.MountedFilesystem`. The [Mount +points](#mount-points) section above describes how this happens in detail. The +`fs.Dirent` restores its pointer to an `fs.Inode`, pointers to parent and +children `fs.Dirents`, and the basename of the file. + +Otherwise an `fs.File` restores flags, an offset, and a unique identifier (only +used internally). + +It may use the `fs.Inode`, which it indirectly holds a reference on through the +`fs.Dirent`, to restablish an open file handle on the backing filesystem (e.g. +to continue reading and writing). + +## Overlay + +The overlay implementation in the fs package takes Linux overlayfs as a frame of +reference but corrects for several POSIX consistency errors. + +In Linux overlayfs, the `struct inode` used for reading and writing to the same +file may be different. This is because the `struct inode` is dissociated with +the process of copying up the file from the upper to the lower directory. Since +flock(2) and fcntl(2) locks, inotify(7) watches, page caches, and a file's +identity are all stored directly or indirectly off the `struct inode`, these +properties of the `struct inode` may be stale after the first modification. This +can lead to file locking bugs, missed inotify events, and inconsistent data in +shared memory mappings of files, to name a few problems. + +The fs package maintains a single `fs.Inode` to represent a directory entry in +an overlay and defines operations on this `fs.Inode` which synchronize with the +copy up process. This achieves several things: + ++ File locks, inotify watches, and the identity of the file need not be copied + at all. + ++ Memory mappings of files coordinate with the copy up process so that if a + file in the lower directory is memory mapped, all references to it are + invalidated, forcing the application to re-fault on memory mappings of the + file under the upper directory. + +The `fs.Inode` holds metadata about files in the upper and/or lower directories +via an `fs.overlayEntry`. The `fs.overlayEntry` implements the `fs.Mappable` +interface. It multiplexes between upper and lower directory memory mappings and +stores a copy of memory references so they can be transferred to the upper +directory `fs.Mappable` when the file is copied up. + +The `fs.Inode` also holds a reference to a `fs.MountedFilesystem` that +normalizes across the mounted filesystem state of the upper and lower +directories. + +When a file is copied from the lower to the upper directory, attempts to +interact with the file block until the copy completes. All copying synchronizes +with rename(2). + +## Future Work + +### Overlay + +When a file is copied from a lower directory to an upper directory, several +locks are taken: the global renamuMu and the copyMu of the `fs.Inode` being +copied. This blocks operations on the file, including fault handling of memory +mappings. Performance could be improved by copying files into a temporary +directory that resides on the same filesystem as the upper directory and doing +an atomic rename, holding locks only during the rename operation. + +Additionally files are copied up synchronously. For large files, this causes a +noticeable latency. Performance could be improved by pipelining copies at +non-overlapping file offsets. diff --git a/pkg/sentry/fs/anon/BUILD b/pkg/sentry/fs/anon/BUILD new file mode 100644 index 000000000..6b18aee47 --- /dev/null +++ b/pkg/sentry/fs/anon/BUILD @@ -0,0 +1,21 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "anon", + srcs = [ + "anon.go", + "device.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/anon/anon.go b/pkg/sentry/fs/anon/anon.go new file mode 100644 index 000000000..ddc2c0985 --- /dev/null +++ b/pkg/sentry/fs/anon/anon.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package anon implements an anonymous inode, useful for implementing +// inodes for pseudo filesystems. +package anon + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// NewInode constructs an anonymous Inode that is not associated +// with any real filesystem. Some types depend on completely pseudo +// "anon" inodes (eventfds, epollfds, etc). +func NewInode(ctx context.Context) *fs.Inode { + return fs.NewInode(fsutil.NewSimpleInodeOperations(fsutil.InodeSimpleAttributes{ + FSType: linux.ANON_INODE_FS_MAGIC, + UAttr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + Links: 1, + }), + }), fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}), fs.StableAttr{ + Type: fs.Anonymous, + DeviceID: PseudoDevice.DeviceID(), + InodeID: PseudoDevice.NextIno(), + BlockSize: usermem.PageSize, + }) +} diff --git a/pkg/sentry/fs/anon/device.go b/pkg/sentry/fs/anon/device.go new file mode 100644 index 000000000..1c666729c --- /dev/null +++ b/pkg/sentry/fs/anon/device.go @@ -0,0 +1,22 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package anon + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/device" +) + +// PseudoDevice is the device on which all anonymous inodes reside. +var PseudoDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/ashmem/BUILD b/pkg/sentry/fs/ashmem/BUILD new file mode 100644 index 000000000..e20e22a0f --- /dev/null +++ b/pkg/sentry/fs/ashmem/BUILD @@ -0,0 +1,83 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_stateify( + name = "ashmem_state", + srcs = [ + "area.go", + "device.go", + "pin_board.go", + "uint64_range.go", + "uint64_set.go", + ], + out = "ashmem_state.go", + package = "ashmem", +) + +go_library( + name = "ashmem", + srcs = [ + "area.go", + "ashmem_state.go", + "device.go", + "pin_board.go", + "uint64_range.go", + "uint64_set.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ashmem", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + ], +) + +go_test( + name = "ashmem_test", + size = "small", + srcs = ["pin_board_test.go"], + embed = [":ashmem"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/usermem", + ], +) + +go_template_instance( + name = "uint64_range", + out = "uint64_range.go", + package = "ashmem", + template = "//pkg/segment:generic_range", + types = { + "T": "uint64", + }, +) + +go_template_instance( + name = "uint64_set", + out = "uint64_set.go", + package = "ashmem", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "Range", + "Value": "noValue", + "Functions": "setFunctions", + }, +) diff --git a/pkg/sentry/fs/ashmem/area.go b/pkg/sentry/fs/ashmem/area.go new file mode 100644 index 000000000..e4f76f0d0 --- /dev/null +++ b/pkg/sentry/fs/ashmem/area.go @@ -0,0 +1,313 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ashmem + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + // namePrefix is the name prefix assumed and forced by the Linux implementation. + namePrefix = "dev/ashmem" + + // nameLen is the maximum name length. + nameLen = 256 +) + +// Area implements fs.FileOperations. +type Area struct { + fsutil.NoFsync + fsutil.DeprecatedFileOperations + fsutil.NotDirReaddir + + ad *Device + + // mu protects fields below. + mu sync.Mutex `state:"nosave"` + tmpfsFile *fs.File + name string + size uint64 + perms usermem.AccessType + pb *PinBoard +} + +// Release implements fs.FileOperations.Release. +func (a *Area) Release() { + a.mu.Lock() + defer a.mu.Unlock() + if a.tmpfsFile != nil { + a.tmpfsFile.DecRef() + a.tmpfsFile = nil + } +} + +// Seek implements fs.FileOperations.Seek. +func (a *Area) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + a.mu.Lock() + defer a.mu.Unlock() + if a.size == 0 { + return 0, syserror.EINVAL + } + if a.tmpfsFile == nil { + return 0, syserror.EBADF + } + return a.tmpfsFile.FileOperations.Seek(ctx, file, whence, offset) +} + +// Read implements fs.FileOperations.Read. +func (a *Area) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + a.mu.Lock() + defer a.mu.Unlock() + if a.size == 0 { + return 0, nil + } + if a.tmpfsFile == nil { + return 0, syserror.EBADF + } + return a.tmpfsFile.FileOperations.Read(ctx, file, dst, offset) +} + +// Write implements fs.FileOperations.Write. +func (a *Area) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + return 0, syserror.ENOSYS +} + +// Flush implements fs.FileOperations.Flush. +func (a *Area) Flush(ctx context.Context, file *fs.File) error { + return nil +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (a *Area) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + a.mu.Lock() + defer a.mu.Unlock() + if a.size == 0 { + return syserror.EINVAL + } + + if !a.perms.SupersetOf(opts.Perms) { + return syserror.EPERM + } + opts.MaxPerms = opts.MaxPerms.Intersect(a.perms) + + if a.tmpfsFile == nil { + p := platform.FromContext(ctx) + if p == nil { + return syserror.ENOMEM + } + tmpfsInodeOps := tmpfs.NewInMemoryFile(ctx, usage.Tmpfs, fs.UnstableAttr{}, p) + // This is not backed by a real filesystem, so we pass in nil. + tmpfsInode := fs.NewInode(tmpfsInodeOps, fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}), fs.StableAttr{}) + dirent := fs.NewDirent(tmpfsInode, namePrefix+"/"+a.name) + tmpfsFile, err := tmpfsInode.GetFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}) + // Drop the extra reference on the Dirent. + dirent.DecRef() + + if err != nil { + return err + } + + // Truncate to the size set by ASHMEM_SET_SIZE ioctl. + err = tmpfsInodeOps.Truncate(ctx, tmpfsInode, int64(a.size)) + if err != nil { + return err + } + a.tmpfsFile = tmpfsFile + a.pb = NewPinBoard() + } + + return a.tmpfsFile.ConfigureMMap(ctx, opts) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (a *Area) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // Switch on ioctl request. + switch args[1].Uint() { + case linux.AshmemSetNameIoctl: + name, err := usermem.CopyStringIn(ctx, io, args[2].Pointer(), nameLen-1, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + a.mu.Lock() + defer a.mu.Unlock() + + // Cannot set name for already mapped ashmem. + if a.tmpfsFile != nil { + return 0, syserror.EINVAL + } + a.name = name + return 0, nil + + case linux.AshmemGetNameIoctl: + a.mu.Lock() + var local []byte + if a.name != "" { + nameLen := len([]byte(a.name)) + local = make([]byte, nameLen, nameLen+1) + copy(local, []byte(a.name)) + local = append(local, 0) + } else { + nameLen := len([]byte(namePrefix)) + local = make([]byte, nameLen, nameLen+1) + copy(local, []byte(namePrefix)) + local = append(local, 0) + } + a.mu.Unlock() + + if _, err := io.CopyOut(ctx, args[2].Pointer(), local, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, syserror.EFAULT + } + return 0, nil + + case linux.AshmemSetSizeIoctl: + a.mu.Lock() + defer a.mu.Unlock() + + // Cannot set size for already mapped ashmem. + if a.tmpfsFile != nil { + return 0, syserror.EINVAL + } + a.size = uint64(args[2].SizeT()) + return 0, nil + + case linux.AshmemGetSizeIoctl: + return uintptr(a.size), nil + + case linux.AshmemPinIoctl, linux.AshmemUnpinIoctl, linux.AshmemGetPinStatusIoctl: + // Locking and unlocking is ok since once tmpfsFile is set, it won't be nil again + // even after unmapping! Unlocking is needed in order to avoid a deadlock on + // usermem.CopyObjectIn. + + // Cannot execute pin-related ioctls before mapping. + a.mu.Lock() + if a.tmpfsFile == nil { + a.mu.Unlock() + return 0, syserror.EINVAL + } + a.mu.Unlock() + + var pin linux.AshmemPin + _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &pin, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, syserror.EFAULT + } + + a.mu.Lock() + defer a.mu.Unlock() + return a.pinOperation(pin, args[1].Uint()) + + case linux.AshmemPurgeAllCachesIoctl: + return 0, nil + + case linux.AshmemSetProtMaskIoctl: + prot := uint64(args[2].ModeT()) + perms := usermem.AccessType{ + Read: prot&linux.PROT_READ != 0, + Write: prot&linux.PROT_WRITE != 0, + Execute: prot&linux.PROT_EXEC != 0, + } + + a.mu.Lock() + defer a.mu.Unlock() + + // Can only narrow prot mask. + if !a.perms.SupersetOf(perms) { + return 0, syserror.EINVAL + } + + // TODO: If personality flag + // READ_IMPLIES_EXEC is set, set PROT_EXEC if PORT_READ is set. + + a.perms = perms + return 0, nil + + case linux.AshmemGetProtMaskIoctl: + return uintptr(a.perms.Prot()), nil + default: + // Ioctls irrelevant to Ashmem. + return 0, syserror.EINVAL + } +} + +// pinOperation should only be called while holding a.mu. +func (a *Area) pinOperation(pin linux.AshmemPin, op uint32) (uintptr, error) { + // Page-align a.size for checks. + pageAlignedSize, ok := usermem.Addr(a.size).RoundUp() + if !ok { + return 0, syserror.EINVAL + } + // Len 0 means everything onward. + if pin.Len == 0 { + pin.Len = uint32(pageAlignedSize) - pin.Offset + } + // Both Offset and Len have to be page-aligned. + if pin.Offset%uint32(usermem.PageSize) != 0 { + return 0, syserror.EINVAL + } + if pin.Len%uint32(usermem.PageSize) != 0 { + return 0, syserror.EINVAL + } + // Adding Offset and Len must not cause an uint32 overflow. + if end := pin.Offset + pin.Len; end < pin.Offset { + return 0, syserror.EINVAL + } + // Pin range must not exceed a's size. + if uint32(pageAlignedSize) < pin.Offset+pin.Len { + return 0, syserror.EINVAL + } + // Handle each operation. + r := RangeFromAshmemPin(pin) + switch op { + case linux.AshmemPinIoctl: + if a.pb.PinRange(r) { + return linux.AshmemWasPurged, nil + } + return linux.AshmemNotPurged, nil + + case linux.AshmemUnpinIoctl: + // TODO: Implement purge on unpin. + a.pb.UnpinRange(r) + return 0, nil + + case linux.AshmemGetPinStatusIoctl: + if a.pb.RangePinnedStatus(r) { + return linux.AshmemIsPinned, nil + } + return linux.AshmemIsUnpinned, nil + + default: + panic("unreachable") + } + +} diff --git a/pkg/sentry/fs/ashmem/device.go b/pkg/sentry/fs/ashmem/device.go new file mode 100644 index 000000000..c5b51d4a7 --- /dev/null +++ b/pkg/sentry/fs/ashmem/device.go @@ -0,0 +1,169 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ashmem implements Android ashmem module (Anonymus Shared Memory). +package ashmem + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Device implements fs.InodeOperations. +type Device struct { + fsutil.DeprecatedFileOperations + fsutil.InodeNoExtendedAttributes + fsutil.InodeNotDirectory + fsutil.InodeNotRenameable + fsutil.InodeNotSocket + fsutil.InodeNotSymlink + fsutil.NoFsync + fsutil.NoMappable + fsutil.NoopWriteOut + fsutil.NotDirReaddir + + mu sync.Mutex `state:"nosave"` + unstable fs.UnstableAttr +} + +// NewDevice creates and intializes a Device structure. +func NewDevice(ctx context.Context, owner fs.FileOwner, fp fs.FilePermissions) *Device { + return &Device{ + unstable: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: fp, + Links: 1, + }), + } +} + +// Release implements fs.InodeOperations.Release. +func (ad *Device) Release(context.Context) {} + +// GetFile implements fs.InodeOperations.GetFile. +func (ad *Device) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &Area{ + ad: ad, + tmpfsFile: nil, + perms: usermem.AnyAccess, + }), nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (ad *Device) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + ad.mu.Lock() + defer ad.mu.Unlock() + return ad.unstable, nil +} + +// Check implements fs.InodeOperations.Check. +func (ad *Device) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (ad *Device) SetPermissions(ctx context.Context, inode *fs.Inode, fp fs.FilePermissions) bool { + ad.mu.Lock() + defer ad.mu.Unlock() + ad.unstable.Perms = fp + ad.unstable.StatusChangeTime = time.NowFromContext(ctx) + return true +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (ad *Device) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + ad.mu.Lock() + defer ad.mu.Unlock() + if owner.UID.Ok() { + ad.unstable.Owner.UID = owner.UID + } + if owner.GID.Ok() { + ad.unstable.Owner.GID = owner.GID + } + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (ad *Device) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + ad.mu.Lock() + defer ad.mu.Unlock() + + now := time.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATimeSetSystemTime { + ad.unstable.AccessTime = now + } else { + ad.unstable.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTimeSetSystemTime { + ad.unstable.ModificationTime = now + } else { + ad.unstable.ModificationTime = ts.MTime + } + } + ad.unstable.StatusChangeTime = now + return nil +} + +// Truncate implements fs.InodeOperations.WriteOut. +// +// Ignored by ashmem. +func (ad *Device) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + return nil +} + +// AddLink implements fs.InodeOperations.AddLink. +// +// Ashmem doesn't support links, no-op. +func (ad *Device) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +// +// Ashmem doesn't support links, no-op. +func (ad *Device) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (ad *Device) NotifyStatusChange(ctx context.Context) { + ad.mu.Lock() + defer ad.mu.Unlock() + now := time.NowFromContext(ctx) + ad.unstable.ModificationTime = now + ad.unstable.StatusChangeTime = now +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +// +// Ashmem is virtual. +func (ad *Device) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +// +// Ashmem doesn't support querying for filesystem info. +func (ad *Device) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syserror.ENOSYS +} diff --git a/pkg/sentry/fs/ashmem/pin_board.go b/pkg/sentry/fs/ashmem/pin_board.go new file mode 100644 index 000000000..c7fb3822c --- /dev/null +++ b/pkg/sentry/fs/ashmem/pin_board.go @@ -0,0 +1,125 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ashmem + +import "gvisor.googlesource.com/gvisor/pkg/abi/linux" + +const maxUint64 = ^uint64(0) + +// setFunctions implements segment.Functions generated from segment.Functions for +// uint64 Key and noValue Value. For more information, see the build file and +// segment set implementation at pkg/segment/set.go. +type setFunctions struct{} + +// noValue is a type of range attached value, which is irrelevant here. +type noValue struct{} + +// MinKey implements segment.Functions.MinKey. +func (setFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (setFunctions) MaxKey() uint64 { + return maxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (setFunctions) ClearValue(*noValue) { + return +} + +// Merge implements segment.Functions.Merge. +func (setFunctions) Merge(Range, noValue, Range, noValue) (noValue, bool) { + return noValue{}, true +} + +// Split implements segment.Functions.Split. +func (setFunctions) Split(Range, noValue, uint64) (noValue, noValue) { + return noValue{}, noValue{} +} + +// PinBoard represents a set of pinned ranges in ashmem. +// +// segment.Set is used for implementation where segments represent +// ranges of pinned bytes, while gaps represent ranges of unpinned +// bytes. All ranges are page-aligned. +type PinBoard struct { + Set +} + +// NewPinBoard creates a new pin board with all pages pinned. +func NewPinBoard() *PinBoard { + var pb PinBoard + pb.PinRange(Range{0, maxUint64}) + return &pb +} + +// PinRange pins all pages in the specified range and returns true +// if there are any newly pinned pages. +func (pb *PinBoard) PinRange(r Range) bool { + pinnedPages := false + for gap := pb.LowerBoundGap(r.Start); gap.Ok() && gap.Start() < r.End; { + common := gap.Range().Intersect(r) + if common.Length() == 0 { + gap = gap.NextGap() + continue + } + pinnedPages = true + gap = pb.Insert(gap, common, noValue{}).NextGap() + } + return pinnedPages +} + +// UnpinRange unpins all pages in the specified range. +func (pb *PinBoard) UnpinRange(r Range) { + for seg := pb.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; { + common := seg.Range().Intersect(r) + if common.Length() == 0 { + seg = seg.NextSegment() + continue + } + seg = pb.RemoveRange(common).NextSegment() + } +} + +// RangePinnedStatus returns false if there's at least one unpinned page in the +// specified range. +func (pb *PinBoard) RangePinnedStatus(r Range) bool { + for gap := pb.LowerBoundGap(r.Start); gap.Ok() && gap.Start() < r.End; { + common := gap.Range().Intersect(r) + if common.Length() == 0 { + gap = gap.NextGap() + continue + } + return false + } + return true +} + +// RangeFromAshmemPin converts ashmem's original pin structure +// to Range. +func RangeFromAshmemPin(ap linux.AshmemPin) Range { + if ap.Len == 0 { + return Range{ + uint64(ap.Offset), + maxUint64, + } + } + return Range{ + uint64(ap.Offset), + uint64(ap.Offset) + uint64(ap.Len), + } +} diff --git a/pkg/sentry/fs/ashmem/pin_board_test.go b/pkg/sentry/fs/ashmem/pin_board_test.go new file mode 100644 index 000000000..f4ea5de6d --- /dev/null +++ b/pkg/sentry/fs/ashmem/pin_board_test.go @@ -0,0 +1,130 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ashmem + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func TestPinBoard(t *testing.T) { + pb := NewPinBoard() + + // Confirm that all pages are pinned. + if !pb.RangePinnedStatus(RangeFromAshmemPin(linux.AshmemPin{0, 0})) { + t.Errorf("RangePinnedStatus(all pages) returned false (unpinned) at start.") + } + + // Unpin pages [1, 11) (counting from 0) + pb.UnpinRange(RangeFromAshmemPin(linux.AshmemPin{ + usermem.PageSize, + usermem.PageSize * 10, + })) + + // Confirm that pages [1, 11) are unpinned and that page 0 and pages + // larger than 10 are pinned. + pinned := []linux.AshmemPin{ + { + 0, + usermem.PageSize, + }, { + usermem.PageSize * 11, + 0, + }, + } + + for _, pin := range pinned { + if !pb.RangePinnedStatus(RangeFromAshmemPin(pin)) { + t.Errorf("RangePinnedStatus(AshmemPin{offset (pages): %v, len (pages): %v}) returned false (unpinned).", + pin.Offset, pin.Len) + } + } + + unpinned := []linux.AshmemPin{ + { + usermem.PageSize, + usermem.PageSize * 10, + }, + } + + for _, pin := range unpinned { + if pb.RangePinnedStatus(RangeFromAshmemPin(pin)) { + t.Errorf("RangePinnedStatus(AshmemPin{offset (pages): %v, len (pages): %v}) returned true (pinned).", + pin.Offset, pin.Len) + } + } + + // Pin pages [2, 6). + pb.PinRange(RangeFromAshmemPin(linux.AshmemPin{ + usermem.PageSize * 2, + usermem.PageSize * 4, + })) + + // Confirm that pages 0, [2, 6) and pages larger than 10 are pinned + // while others remain unpinned. + pinned = []linux.AshmemPin{ + { + 0, + usermem.PageSize, + }, + { + usermem.PageSize * 2, + usermem.PageSize * 4, + }, + { + usermem.PageSize * 11, + 0, + }, + } + + for _, pin := range pinned { + if !pb.RangePinnedStatus(RangeFromAshmemPin(pin)) { + t.Errorf("RangePinnedStatus(AshmemPin{offset (pages): %v, len (pages): %v}) returned false (unpinned).", + pin.Offset, pin.Len) + } + } + + unpinned = []linux.AshmemPin{ + { + usermem.PageSize, + usermem.PageSize, + }, { + usermem.PageSize * 6, + usermem.PageSize * 5, + }, + } + + for _, pin := range unpinned { + if pb.RangePinnedStatus(RangeFromAshmemPin(pin)) { + t.Errorf("RangePinnedStatus(AshmemPin{offset (pages): %v, len (pages): %v}) returned true (pinned).", + pin.Offset, pin.Len) + } + } + + // Status of a partially pinned range is unpinned. + if pb.RangePinnedStatus(RangeFromAshmemPin(linux.AshmemPin{0, 0})) { + t.Errorf("RangePinnedStatus(all pages) returned true (pinned).") + } + + // Pin the whole range again. + pb.PinRange(RangeFromAshmemPin(linux.AshmemPin{0, 0})) + + // Confirm that all pages are pinned. + if !pb.RangePinnedStatus(RangeFromAshmemPin(linux.AshmemPin{0, 0})) { + t.Errorf("RangePinnedStatus(all pages) returned false (unpinned) at start.") + } +} diff --git a/pkg/sentry/fs/attr.go b/pkg/sentry/fs/attr.go new file mode 100644 index 000000000..56a2ad6f7 --- /dev/null +++ b/pkg/sentry/fs/attr.go @@ -0,0 +1,382 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "os" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" +) + +// InodeType enumerates types of Inodes. +type InodeType int + +const ( + // RegularFile is a regular file. + RegularFile InodeType = iota + + // SpecialFile is a file that doesn't support SeekEnd. It is used for + // things like proc files. + SpecialFile + + // Directory is a directory. + Directory + + // SpecialDirectory is a directory that *does* support SeekEnd. It's + // the opposite of the SpecialFile scenario above. It similarly + // supports proc files. + SpecialDirectory + + // Symlink is a symbolic link. + Symlink + + // Pipe is a pipe (named or regular). + Pipe + + // Socket is a socket. + Socket + + // CharacterDevice is a character device. + CharacterDevice + + // BlockDevice is a block device. + BlockDevice + + // Anonymous is an anonymous type when none of the above apply. + // Epoll fds and event-driven fds fit this category. + Anonymous +) + +// String returns a human-readable representation of the InodeType. +func (n InodeType) String() string { + switch n { + case RegularFile, SpecialFile: + return "file" + case Directory, SpecialDirectory: + return "directory" + case Symlink: + return "symlink" + case Pipe: + return "pipe" + case Socket: + return "socket" + case CharacterDevice: + return "character-device" + case BlockDevice: + return "block-device" + case Anonymous: + return "anonymous" + default: + return "unknown" + } +} + +// StableAttr contains Inode attributes that will be stable throughout the +// lifetime of the Inode. +type StableAttr struct { + // Type is the InodeType of a InodeOperations. + Type InodeType + + // DeviceID is the device on which a InodeOperations resides. + DeviceID uint64 + + // InodeID uniquely identifies InodeOperations on its device. + InodeID uint64 + + // BlockSize is the block size of data backing this InodeOperations. + BlockSize int64 + + // DeviceFileMajor is the major device number of this Node, if it is a + // device file. + DeviceFileMajor uint16 + + // DeviceFileMinor is the minor device number of this Node, if it is a + // device file. + DeviceFileMinor uint32 +} + +// IsRegular returns true if StableAttr.Type matches a regular file. +func IsRegular(s StableAttr) bool { + return s.Type == RegularFile +} + +// IsFile returns true if StableAttr.Type matches any type of file. +func IsFile(s StableAttr) bool { + return s.Type == RegularFile || s.Type == SpecialFile +} + +// IsDir returns true if StableAttr.Type matches any type of directory. +func IsDir(s StableAttr) bool { + return s.Type == Directory || s.Type == SpecialDirectory +} + +// IsSymlink returns true if StableAttr.Type matches a symlink. +func IsSymlink(s StableAttr) bool { + return s.Type == Symlink +} + +// IsPipe returns true if StableAttr.Type matches any type of pipe. +func IsPipe(s StableAttr) bool { + return s.Type == Pipe +} + +// IsSocket returns true if StableAttr.Type matches any type of socket. +func IsSocket(s StableAttr) bool { + return s.Type == Socket +} + +// IsCharDevice returns true if StableAttr.Type matches a character device. +func IsCharDevice(s StableAttr) bool { + return s.Type == CharacterDevice +} + +// UnstableAttr contains Inode attributes that may change over the lifetime +// of the Inode. +type UnstableAttr struct { + // Size is the file size in bytes. + Size int64 + + // Usage is the actual data usage in bytes. + Usage int64 + + // Perms is the protection (read/write/execute for user/group/other). + Perms FilePermissions + + // Owner describes the ownership of this file. + Owner FileOwner + + // AccessTime is the time of last access + AccessTime ktime.Time + + // ModificationTime is the time of last modification. + ModificationTime ktime.Time + + // StatusChangeTime is the time of last attribute modification. + StatusChangeTime ktime.Time + + // Links is the number of hard links. + Links uint64 +} + +// WithCurrentTime returns u with AccessTime == ModificationTime == current time. +func WithCurrentTime(ctx context.Context, u UnstableAttr) UnstableAttr { + t := ktime.NowFromContext(ctx) + u.AccessTime = t + u.ModificationTime = t + u.StatusChangeTime = t + return u +} + +// AttrMask contains fields to mask StableAttr and UnstableAttr. +type AttrMask struct { + Type bool + DeviceID bool + InodeID bool + BlockSize bool + Size bool + Usage bool + Perms bool + UID bool + GID bool + AccessTime bool + ModificationTime bool + StatusChangeTime bool + Links bool +} + +// Empty returns true if all fields in AttrMask are false. +func (a AttrMask) Empty() bool { + return a == AttrMask{} +} + +// Union returns an AttrMask containing the inclusive disjunction of fields in a and b. +func (a AttrMask) Union(b AttrMask) AttrMask { + return AttrMask{ + Type: a.Type || b.Type, + DeviceID: a.DeviceID || b.DeviceID, + InodeID: a.InodeID || b.InodeID, + BlockSize: a.BlockSize || b.BlockSize, + Size: a.Size || b.Size, + Usage: a.Usage || b.Usage, + Perms: a.Perms || b.Perms, + UID: a.UID || b.UID, + GID: a.GID || b.GID, + AccessTime: a.AccessTime || b.AccessTime, + ModificationTime: a.ModificationTime || b.ModificationTime, + StatusChangeTime: a.StatusChangeTime || b.StatusChangeTime, + Links: a.Links || b.Links, + } +} + +// PermMask are file access permissions. +type PermMask struct { + // Read indicates reading is permitted. + Read bool + + // Write indicates writing is permitted. + Write bool + + // Execute indicates execution is permitted. + Execute bool +} + +// OnlyRead returns true when only the read bit is set. +func (p PermMask) OnlyRead() bool { + return p.Read && !p.Write && !p.Execute +} + +// String implements the fmt.Stringer interface for PermMask. +func (p PermMask) String() string { + return fmt.Sprintf("PermMask{Read: %v, Write: %v, Execute: %v}", p.Read, p.Write, p.Execute) +} + +// Mode returns the system mode (syscall.S_IXOTH, etc.) for these permissions +// in the "other" bits. +func (p PermMask) Mode() (mode os.FileMode) { + if p.Read { + mode |= syscall.S_IROTH + } + if p.Write { + mode |= syscall.S_IWOTH + } + if p.Execute { + mode |= syscall.S_IXOTH + } + return +} + +// SupersetOf returns true iff the permissions in p are a superset of the +// permissions in other. +func (p PermMask) SupersetOf(other PermMask) bool { + if !p.Read && other.Read { + return false + } + if !p.Write && other.Write { + return false + } + if !p.Execute && other.Execute { + return false + } + return true +} + +// FilePermissions represents the permissions of a file, with +// Read/Write/Execute bits for user, group, and other. +type FilePermissions struct { + User PermMask + Group PermMask + Other PermMask + + // Sticky, if set on directories, restricts renaming and deletion of + // files in those directories to the directory owner, file owner, or + // CAP_FOWNER. The sticky bit is ignored when set on other files. + Sticky bool + + // SetUID executables can call UID-setting syscalls without CAP_SETUID. + SetUID bool + + // SetGID executables can call GID-setting syscalls without CAP_SETGID. + SetGID bool +} + +// PermsFromMode takes the Other permissions (last 3 bits) of a FileMode and +// returns a set of PermMask. +func PermsFromMode(mode linux.FileMode) (perms PermMask) { + perms.Read = mode&linux.ModeOtherRead != 0 + perms.Write = mode&linux.ModeOtherWrite != 0 + perms.Execute = mode&linux.ModeOtherExec != 0 + return +} + +// FilePermsFromP9 converts a p9.FileMode to a FilePermissions struct. +func FilePermsFromP9(mode p9.FileMode) FilePermissions { + return FilePermsFromMode(linux.FileMode(mode)) +} + +// FilePermsFromMode converts a system file mode to a FilePermissions struct. +func FilePermsFromMode(mode linux.FileMode) (fp FilePermissions) { + perm := mode.Permissions() + fp.Other = PermsFromMode(perm) + fp.Group = PermsFromMode(perm >> 3) + fp.User = PermsFromMode(perm >> 6) + fp.Sticky = mode&linux.ModeSticky == linux.ModeSticky + fp.SetUID = mode&linux.ModeSetUID == linux.ModeSetUID + fp.SetGID = mode&linux.ModeSetGID == linux.ModeSetGID + return +} + +// LinuxMode returns the linux mode_t representation of these permissions. +func (f FilePermissions) LinuxMode() linux.FileMode { + m := linux.FileMode(f.User.Mode()<<6 | f.Group.Mode()<<3 | f.Other.Mode()) + if f.SetUID { + m |= linux.ModeSetUID + } + if f.SetGID { + m |= linux.ModeSetGID + } + if f.Sticky { + m |= linux.ModeSticky + } + return m +} + +// OSMode returns the Go runtime's OS independent os.FileMode representation of +// these permissions. +func (f FilePermissions) OSMode() os.FileMode { + m := os.FileMode(f.User.Mode()<<6 | f.Group.Mode()<<3 | f.Other.Mode()) + if f.SetUID { + m |= os.ModeSetuid + } + if f.SetGID { + m |= os.ModeSetgid + } + if f.Sticky { + m |= os.ModeSticky + } + return m +} + +// AnyExecute returns true if any of U/G/O have the execute bit set. +func (f FilePermissions) AnyExecute() bool { + return f.User.Execute || f.Group.Execute || f.Other.Execute +} + +// AnyWrite returns true if any of U/G/O have the write bit set. +func (f FilePermissions) AnyWrite() bool { + return f.User.Write || f.Group.Write || f.Other.Write +} + +// AnyRead returns true if any of U/G/O have the read bit set. +func (f FilePermissions) AnyRead() bool { + return f.User.Read || f.Group.Read || f.Other.Read +} + +// FileOwner represents ownership of a file. +type FileOwner struct { + UID auth.KUID + GID auth.KGID +} + +// RootOwner corresponds to KUID/KGID 0/0. +var RootOwner = FileOwner{ + UID: auth.RootKUID, + GID: auth.RootKGID, +} diff --git a/pkg/sentry/fs/binder/BUILD b/pkg/sentry/fs/binder/BUILD new file mode 100644 index 000000000..15f91699f --- /dev/null +++ b/pkg/sentry/fs/binder/BUILD @@ -0,0 +1,38 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "binder_state", + srcs = ["binder.go"], + out = "binder_state.go", + package = "binder", +) + +go_library( + name = "binder", + srcs = [ + "binder.go", + "binder_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/binder", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + ], +) diff --git a/pkg/sentry/fs/binder/binder.go b/pkg/sentry/fs/binder/binder.go new file mode 100644 index 000000000..3f87b6b08 --- /dev/null +++ b/pkg/sentry/fs/binder/binder.go @@ -0,0 +1,358 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package binder implements Android Binder IPC module. +package binder + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + currentProtocolVersion = 8 + + // mmapSizeLimit is the upper limit for mapped memory size in Binder. + mmapSizeLimit = 4 * 1024 * 1024 // 4MB +) + +// Device implements fs.InodeOperations. +type Device struct { + fsutil.InodeNoExtendedAttributes + fsutil.InodeNotDirectory + fsutil.InodeNotRenameable + fsutil.InodeNotSocket + fsutil.InodeNotSymlink + fsutil.NoMappable + fsutil.NoopWriteOut + fsutil.DeprecatedFileOperations + + // mu protects unstable. + mu sync.Mutex `state:"nosave"` + unstable fs.UnstableAttr +} + +// NewDevice creates and intializes a Device structure. +func NewDevice(ctx context.Context, owner fs.FileOwner, fp fs.FilePermissions) *Device { + return &Device{ + unstable: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: fp, + Links: 1, + }), + } +} + +// Release implements fs.InodeOperations.Release. +func (bd *Device) Release(context.Context) {} + +// GetFile implements fs.InodeOperations.GetFile. +// +// TODO: Add functionality to GetFile: Additional fields will be +// needed in the Device structure, initialize them here. Also, Device will need +// to keep track of the created Procs in order to implement BINDER_READ_WRITE +// ioctl. +func (bd *Device) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &Proc{ + bd: bd, + task: kernel.TaskFromContext(ctx), + platform: platform.FromContext(ctx), + }), nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (bd *Device) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + bd.mu.Lock() + defer bd.mu.Unlock() + return bd.unstable, nil +} + +// Check implements fs.InodeOperations.Check. +func (bd *Device) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (bd *Device) SetPermissions(ctx context.Context, inode *fs.Inode, fp fs.FilePermissions) bool { + bd.mu.Lock() + defer bd.mu.Unlock() + bd.unstable.Perms = fp + bd.unstable.StatusChangeTime = time.NowFromContext(ctx) + return true +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (bd *Device) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + bd.mu.Lock() + defer bd.mu.Unlock() + if owner.UID.Ok() { + bd.unstable.Owner.UID = owner.UID + } + if owner.GID.Ok() { + bd.unstable.Owner.GID = owner.GID + } + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (bd *Device) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + bd.mu.Lock() + defer bd.mu.Unlock() + + now := time.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATimeSetSystemTime { + bd.unstable.AccessTime = now + } else { + bd.unstable.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTimeSetSystemTime { + bd.unstable.ModificationTime = now + } else { + bd.unstable.ModificationTime = ts.MTime + } + } + bd.unstable.StatusChangeTime = now + return nil +} + +// Truncate implements fs.InodeOperations.WriteOut. +// +// Ignored for a character device, such as Binder. +func (bd *Device) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + return nil +} + +// AddLink implements fs.InodeOperations.AddLink. +// +// Binder doesn't support links, no-op. +func (bd *Device) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +// +// Binder doesn't support links, no-op. +func (bd *Device) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (bd *Device) NotifyStatusChange(ctx context.Context) { + bd.mu.Lock() + defer bd.mu.Unlock() + now := time.NowFromContext(ctx) + bd.unstable.ModificationTime = now + bd.unstable.StatusChangeTime = now +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +// +// Binder is virtual. +func (bd *Device) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +// +// Binder doesn't support querying for filesystem info. +func (bd *Device) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syserror.ENOSYS +} + +// Proc implements fs.FileOperations and fs.IoctlGetter. +type Proc struct { + fsutil.NoFsync + fsutil.DeprecatedFileOperations + fsutil.NotDirReaddir + + bd *Device + task *kernel.Task + platform platform.Platform + + // mu protects fr. + mu sync.Mutex `state:"nosave"` + + // mapped is memory allocated from platform.Memory() by AddMapping. + mapped platform.FileRange +} + +// Release implements fs.FileOperations.Release. +func (bp *Proc) Release() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.mapped.Length() != 0 { + bp.platform.Memory().DecRef(bp.mapped) + } +} + +// Seek implements fs.FileOperations.Seek. +// +// Binder doesn't support seek operation (unless in debug mode). +func (bp *Proc) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return offset, syserror.EOPNOTSUPP +} + +// Read implements fs.FileOperations.Read. +// +// Binder doesn't support read operation (unless in debug mode). +func (bp *Proc) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + return 0, syserror.EOPNOTSUPP +} + +// Write implements fs.FileOperations.Write. +// +// Binder doesn't support write operation. +func (bp *Proc) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + return 0, syserror.EOPNOTSUPP +} + +// Flush implements fs.FileOperations.Flush. +// +// TODO: Implement. +func (bp *Proc) Flush(ctx context.Context, file *fs.File) error { + return nil +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (bp *Proc) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + // Compare drivers/android/binder.c:binder_mmap(). + if caller := kernel.TaskFromContext(ctx); caller != bp.task { + return syserror.EINVAL + } + if opts.Length > mmapSizeLimit { + opts.Length = mmapSizeLimit + } + opts.MaxPerms.Write = false + + // TODO: Binder sets VM_DONTCOPY, preventing the created vma + // from being copied across fork(), but we don't support this yet. As + // a result, MMs containing a Binder mapping cannot be forked (MM.Fork will + // fail when AddMapping returns EBUSY). + + return fsutil.GenericConfigureMMap(file, bp, opts) +} + +// Ioctl implements fs.FileOperations.Ioctl. +// +// TODO: Implement. +func (bp *Proc) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // Switch on ioctl request. + switch uint32(args[1].Int()) { + case linux.BinderVersionIoctl: + ver := &linux.BinderVersion{ + ProtocolVersion: currentProtocolVersion, + } + // Copy result to user-space. + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), ver, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + case linux.BinderWriteReadIoctl: + // TODO: Implement. + fallthrough + case linux.BinderSetIdleTimeoutIoctl: + // TODO: Implement. + fallthrough + case linux.BinderSetMaxThreadsIoctl: + // TODO: Implement. + fallthrough + case linux.BinderSetIdlePriorityIoctl: + // TODO: Implement. + fallthrough + case linux.BinderSetContextMgrIoctl: + // TODO: Implement. + fallthrough + case linux.BinderThreadExitIoctl: + // TODO: Implement. + return 0, syserror.ENOSYS + default: + // Ioctls irrelevant to Binder. + return 0, syserror.EINVAL + } +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (bp *Proc) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.mapped.Length() != 0 { + // mmap has been called before, which binder_mmap() doesn't like. + return syserror.EBUSY + } + // Binder only allocates and maps a single page up-front + // (drivers/android/binder.c:binder_mmap() => binder_update_page_range()). + fr, err := bp.platform.Memory().Allocate(usermem.PageSize, usage.Anonymous) + if err != nil { + return err + } + bp.mapped = fr + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (bp *Proc) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { + // Nothing to do. Notably, we don't free bp.mapped to allow another mmap. +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (bp *Proc) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + // Nothing to do. Notably, this is one case where CopyMapping isn't + // equivalent to AddMapping, as AddMapping would return EBUSY. + return nil +} + +// Translate implements memmap.Mappable.Translate. +func (bp *Proc) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + // TODO: In addition to the page initially allocated and mapped + // in AddMapping (Linux: binder_mmap), Binder allocates and maps pages for + // each transaction (Linux: binder_ioctl => binder_ioctl_write_read => + // binder_thread_write => binder_transaction => binder_alloc_buf => + // binder_update_page_range). Since we don't actually implement + // BinderWriteReadIoctl (Linux: BINDER_WRITE_READ), we only ever have the + // first page. + var err error + if required.End > usermem.PageSize { + err = &memmap.BusError{syserror.EFAULT} + } + if required.Start == 0 { + return []memmap.Translation{ + { + Source: memmap.MappableRange{0, usermem.PageSize}, + File: bp.platform.Memory(), + Offset: bp.mapped.Start, + }, + }, err + } + return nil, err +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (bp *Proc) InvalidateUnsavable(ctx context.Context) error { + return nil +} diff --git a/pkg/sentry/fs/context.go b/pkg/sentry/fs/context.go new file mode 100644 index 000000000..b521bce75 --- /dev/null +++ b/pkg/sentry/fs/context.go @@ -0,0 +1,97 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" +) + +// contextID is the kernel package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxRoot is a Context.Value key for a Dirent. + CtxRoot contextID = iota +) + +// ContextCanAccessFile determines whether `file` can be accessed in the requested way +// (for reading, writing, or execution) using the caller's credentials and user +// namespace, as does Linux's fs/namei.c:generic_permission. +func ContextCanAccessFile(ctx context.Context, inode *Inode, reqPerms PermMask) bool { + creds := auth.CredentialsFromContext(ctx) + uattr, err := inode.UnstableAttr(ctx) + if err != nil { + return false + } + + p := uattr.Perms.Other + // Are we owner or in group? + if uattr.Owner.UID == creds.EffectiveKUID { + p = uattr.Perms.User + } else if creds.InGroup(uattr.Owner.GID) { + p = uattr.Perms.Group + } + + // Are permissions satisfied without capability checks? + if p.SupersetOf(reqPerms) { + return true + } + + if IsDir(inode.StableAttr) { + // CAP_DAC_OVERRIDE can override any perms on directories. + if inode.CheckCapability(ctx, linux.CAP_DAC_OVERRIDE) { + return true + } + + // CAP_DAC_READ_SEARCH can normally only override Read perms, + // but for directories it can also override execution. + if !reqPerms.Write && inode.CheckCapability(ctx, linux.CAP_DAC_READ_SEARCH) { + return true + } + } + + // CAP_DAC_OVERRIDE can always override Read/Write. + // Can override executable only when at least one execute bit is set. + if !reqPerms.Execute || uattr.Perms.AnyExecute() { + if inode.CheckCapability(ctx, linux.CAP_DAC_OVERRIDE) { + return true + } + } + + // Read perms can be overridden by CAP_DAC_READ_SEARCH. + if reqPerms.OnlyRead() && inode.CheckCapability(ctx, linux.CAP_DAC_READ_SEARCH) { + return true + } + return false +} + +// FileOwnerFromContext returns a FileOwner using the effective user and group +// IDs used by ctx. +func FileOwnerFromContext(ctx context.Context) FileOwner { + creds := auth.CredentialsFromContext(ctx) + return FileOwner{creds.EffectiveKUID, creds.EffectiveKGID} +} + +// RootFromContext returns the root of the virtual filesystem observed by ctx, +// or nil if ctx is not associated with a virtual filesystem. If +// RootFromContext returns a non-nil fs.Dirent, a reference is taken on it. +func RootFromContext(ctx context.Context) *Dirent { + if v := ctx.Value(CtxRoot); v != nil { + return v.(*Dirent) + } + return nil +} diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go new file mode 100644 index 000000000..ea74d0efd --- /dev/null +++ b/pkg/sentry/fs/copy_up.go @@ -0,0 +1,414 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "io" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// copyUp copies a file in an overlay from a lower filesystem to an +// upper filesytem so that the file can be modified in the upper +// filesystem. Copying a file involves several steps: +// +// - All parent directories of the file are created in the upper +// filesystem if they don't exist there. For instance: +// +// upper /dir0 +// lower /dir0/dir1/file +// +// copyUp of /dir0/dir1/file creates /dir0/dir1 in order to create +// /dir0/dir1/file. +// +// - The file content is copied from the lower file to the upper +// file. For symlinks this is the symlink target. For directories, +// upper directory entries are merged with lower directory entries +// so there is no need to copy any entries. +// +// - A subset of file attributes of the lower file are set on the +// upper file. These are the file owner, the file timestamps, +// and all non-overlay extended attributes. copyUp will fail if +// the upper filesystem does not support the setting of these +// attributes. +// +// The file's permissions are set when the file is created and its +// size will be brought up to date when its contents are copied. +// Notably no attempt is made to bring link count up to date because +// hard links are currently not preserved across overlay filesystems. +// +// - Memory mappings of the lower file are invalidated and memory +// references are transferred to the upper file. From this point on, +// memory mappings of the file will be backed by content in the upper +// filesystem. +// +// Synchronization: +// +// copyUp synchronizes with rename(2) using renameMu to ensure that +// parentage does not change while a file is being copied. In the context +// of rename(2), copyUpLockedForRename should be used to avoid deadlock on +// renameMu. +// +// The following operations synchronize with copyUp using copyMu: +// +// - InodeOperations, i.e. to ensure that looking up a directory takes +// into account new upper filesystem directories created by copy up, +// which subsequently can be modified. +// +// - FileOperations, i.e. to ensure that reading from a file does not +// continue using a stale, lower filesystem handle when the file is +// written to. +// +// Lock ordering: Dirent.mu -> Inode.overlay.copyMu -> Inode.mu. +// +// Caveats: +// +// If any step in copying up a file fails, copyUp cleans the upper +// filesystem of any partially up-to-date file. If this cleanup fails, +// the overlay may be in an unacceptable, inconsistent state, so copyUp +// panics. If copyUp fails because any step (above) fails, a generic +// error is returned. +// +// copyUp currently makes no attempt to optimize copying up file content. +// For large files, this means that copyUp blocks until the entire file +// is copied synchronously. +func copyUp(ctx context.Context, d *Dirent) error { + renameMu.RLock() + defer renameMu.RUnlock() + return copyUpLockedForRename(ctx, d) +} + +// copyUpLockedForRename is the same as copyUp except that it does not lock +// renameMu. +// +// It copies each component of d that does not yet exist in the upper +// filesystem. If d already exists in the upper filesystem, it is a no-op. +// +// Any error returned indicates a failure to copy all of d. This may +// leave the upper filesystem filled with any number of parent directories +// but the upper filesystem will never be in an inconsistent state. +// +// Preconditions: +// - d.Inode.overlay is non-nil. +func copyUpLockedForRename(ctx context.Context, d *Dirent) error { + for { + // Did we race with another copy up or does there + // already exist something in the upper filesystem + // for d? + d.Inode.overlay.copyMu.Lock() + if d.Inode.overlay.upper != nil { + d.Inode.overlay.copyMu.Unlock() + // Done, d is in the upper filesystem. + return nil + } + d.Inode.overlay.copyMu.Unlock() + + // Find the next component to copy up. We will work our way + // down to the last component of d and finally copy it. + next := findNextCopyUp(ctx, d) + + // Attempt to copy. + if err := doCopyUp(ctx, next); err != nil { + return err + } + } +} + +// findNextCopyUp finds the next component of d from root that does not +// yet exist in the upper filesystem. The parent of this component is +// also returned, which is the root of the overlay in the worst case. +func findNextCopyUp(ctx context.Context, d *Dirent) *Dirent { + next := d + for parent := next.parent; ; /* checked in-loop */ /* updated in-loop */ { + // Does this parent have a non-nil upper Inode? + parent.Inode.overlay.copyMu.RLock() + if parent.Inode.overlay.upper != nil { + parent.Inode.overlay.copyMu.RUnlock() + // Note that since we found an upper, it is stable. + return next + } + parent.Inode.overlay.copyMu.RUnlock() + + // Continue searching for a parent with a non-nil + // upper Inode. + next = parent + parent = next.parent + } +} + +func doCopyUp(ctx context.Context, d *Dirent) error { + // Wait to get exclusive access to the upper Inode. + d.Inode.overlay.copyMu.Lock() + defer d.Inode.overlay.copyMu.Unlock() + if d.Inode.overlay.upper != nil { + // We raced with another doCopyUp, no problem. + return nil + } + + // Perform the copy. + return copyUpLocked(ctx, d.parent, d) +} + +// copyUpLocked creates a copy of next in the upper filesystem of parent. +// +// copyUpLocked must be called with d.Inode.overlay.copyMu locked. +// +// Returns a generic error on failure. +// +// Preconditions: +// - parent.Inode.overlay.upper must be non-nil. +// - next.Inode.overlay.copyMu must be locked writable. +// - next.Inode.overlay.lower must be non-nil. +// - upper filesystem must support setting file ownership and timestamps. +func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error { + // Extract the attributes of the file we wish to copy. + attrs, err := next.Inode.overlay.lower.UnstableAttr(ctx) + if err != nil { + log.Warningf("copy up failed to get lower attributes: %v", err) + return syserror.EIO + } + + var childUpperInode *Inode + parentUpper := parent.Inode.overlay.upper + + // Create the file in the upper filesystem and get an Inode for it. + switch next.Inode.StableAttr.Type { + case RegularFile: + childFile, err := parentUpper.Create(ctx, RootFromContext(ctx), next.name, FileFlags{Read: true, Write: true}, attrs.Perms) + if err != nil { + log.Warningf("copy up failed to create file: %v", err) + return syserror.EIO + } + defer childFile.DecRef() + childUpperInode = childFile.Dirent.Inode + + case Directory: + if err := parentUpper.CreateDirectory(ctx, RootFromContext(ctx), next.name, attrs.Perms); err != nil { + log.Warningf("copy up failed to create directory: %v", err) + return syserror.EIO + } + childUpper, err := parentUpper.Lookup(ctx, next.name) + if err != nil { + log.Warningf("copy up failed to lookup directory: %v", err) + cleanupUpper(ctx, parentUpper, next.name) + return syserror.EIO + } + defer childUpper.DecRef() + childUpperInode = childUpper.Inode + + case Symlink: + childLower := next.Inode.overlay.lower + link, err := childLower.Readlink(ctx) + if err != nil { + log.Warningf("copy up failed to read symlink value: %v", err) + return syserror.EIO + } + if err := parentUpper.CreateLink(ctx, RootFromContext(ctx), link, next.name); err != nil { + log.Warningf("copy up failed to create symlink: %v", err) + return syserror.EIO + } + childUpper, err := parentUpper.Lookup(ctx, next.name) + if err != nil { + log.Warningf("copy up failed to lookup symlink: %v", err) + cleanupUpper(ctx, parentUpper, next.name) + return syserror.EIO + } + defer childUpper.DecRef() + childUpperInode = childUpper.Inode + + default: + return syserror.EINVAL + } + + // Bring file attributes up to date. This does not include size, which will be + // brought up to date with copyContentsLocked. + if err := copyAttributesLocked(ctx, childUpperInode, next.Inode.overlay.lower); err != nil { + log.Warningf("copy up failed to copy up attributes: %v", err) + cleanupUpper(ctx, parentUpper, next.name) + return syserror.EIO + } + + // Copy the entire file. + if err := copyContentsLocked(ctx, childUpperInode, next.Inode.overlay.lower, attrs.Size); err != nil { + log.Warningf("copy up failed to copy up contents: %v", err) + cleanupUpper(ctx, parentUpper, next.name) + return syserror.EIO + } + + lowerMappable := next.Inode.overlay.lower.Mappable() + upperMappable := childUpperInode.Mappable() + if lowerMappable != nil && upperMappable == nil { + log.Warningf("copy up failed: cannot ensure memory mapping coherence") + cleanupUpper(ctx, parentUpper, next.name) + return syserror.EIO + } + + // Propagate memory mappings to the upper Inode. + next.Inode.overlay.mapsMu.Lock() + defer next.Inode.overlay.mapsMu.Unlock() + if upperMappable != nil { + // Remember which mappings we added so we can remove them on failure. + allAdded := make(map[memmap.MappableRange]memmap.MappingsOfRange) + for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + added := make(memmap.MappingsOfRange) + for m := range seg.Value() { + if err := upperMappable.AddMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start()); err != nil { + for m := range added { + upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start()) + } + for mr, mappings := range allAdded { + for m := range mappings { + upperMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, mr.Start) + } + } + return err + } + added[m] = struct{}{} + } + allAdded[seg.Range()] = added + } + } + + // Take a reference on the upper Inode (transferred to + // next.Inode.overlay.upper) and make new translations use it. + next.Inode.overlay.dataMu.Lock() + childUpperInode.IncRef() + next.Inode.overlay.upper = childUpperInode + next.Inode.overlay.dataMu.Unlock() + + // Invalidate existing translations through the lower Inode. + next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{}) + + // Remove existing memory mappings from the lower Inode. + if lowerMappable != nil { + for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + for m := range seg.Value() { + lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start()) + } + } + } + + return nil +} + +// cleanupUpper removes name from parent, and panics if it is unsuccessful. +func cleanupUpper(ctx context.Context, parent *Inode, name string) { + if err := parent.InodeOperations.Remove(ctx, parent, name); err != nil { + // Unfortunately we don't have much choice. We shouldn't + // willingly give the caller access to a nonsense filesystem. + panic(fmt.Sprintf("overlay filesystem is in an inconsistent state: failed to remove %q from upper filesystem: %v", name, err)) + } +} + +// copyUpBuffers is a buffer pool for copying file content. The buffer +// size is the same used by io.Copy. +var copyUpBuffers = sync.Pool{New: func() interface{} { return make([]byte, 8*usermem.PageSize) }} + +// copyContentsLocked copies the contents of lower to upper. It panics if +// less than size bytes can be copied. +func copyContentsLocked(ctx context.Context, upper *Inode, lower *Inode, size int64) error { + // We don't support copying up for anything other than regular files. + if lower.StableAttr.Type != RegularFile { + return nil + } + + // Get a handle to the upper filesystem, which we will write to. + upperFile, err := overlayFile(ctx, upper, FileFlags{Write: true}) + if err != nil { + return err + } + defer upperFile.DecRef() + + // Get a handle to the lower filesystem, which we will read from. + lowerFile, err := overlayFile(ctx, lower, FileFlags{Read: true}) + if err != nil { + return err + } + defer lowerFile.DecRef() + + // Use a buffer pool to minimize allocations. + buf := copyUpBuffers.Get().([]byte) + defer copyUpBuffers.Put(buf) + + // Transfer the contents. + // + // One might be able to optimize this by doing parallel reads, parallel writes and reads, larger + // buffers, etc. But we really don't know anything about the underlying implementation, so these + // optimizations could be self-defeating. So we leave this as simple as possible. + var offset int64 + for { + nr, err := lowerFile.FileOperations.Read(ctx, lowerFile, usermem.BytesIOSequence(buf), offset) + if err != nil && err != io.EOF { + return err + } + if nr == 0 { + if offset != size { + // Same as in cleanupUpper, we cannot live + // with ourselves if we do anything less. + panic(fmt.Sprintf("filesystem is in an inconsistent state: wrote only %d bytes of %d sized file", offset, size)) + } + return nil + } + nw, err := upperFile.FileOperations.Write(ctx, upperFile, usermem.BytesIOSequence(buf[:nr]), offset) + if err != nil { + return err + } + offset += nw + } +} + +// copyAttributesLocked copies a subset of lower's attributes to upper, +// specifically owner, timestamps (except of status change time), and +// extended attributes. Notably no attempt is made to copy link count. +// Size and permissions are set on upper when the file content is copied +// and when the file is created respectively. +func copyAttributesLocked(ctx context.Context, upper *Inode, lower *Inode) error { + // Extract attributes fro the lower filesystem. + lowerAttr, err := lower.UnstableAttr(ctx) + if err != nil { + return err + } + lowerXattr, err := lower.Listxattr() + if err != nil && err != syserror.EOPNOTSUPP { + return err + } + + // Set the attributes on the upper filesystem. + if err := upper.InodeOperations.SetOwner(ctx, upper, lowerAttr.Owner); err != nil { + return err + } + if err := upper.InodeOperations.SetTimestamps(ctx, upper, TimeSpec{ + ATime: lowerAttr.AccessTime, + MTime: lowerAttr.ModificationTime, + }); err != nil { + return err + } + for name := range lowerXattr { + value, err := lower.Getxattr(name) + if err != nil { + return err + } + if err := upper.InodeOperations.Setxattr(upper, name, value); err != nil { + return err + } + } + return nil +} diff --git a/pkg/sentry/fs/copy_up_test.go b/pkg/sentry/fs/copy_up_test.go new file mode 100644 index 000000000..c3c9d963d --- /dev/null +++ b/pkg/sentry/fs/copy_up_test.go @@ -0,0 +1,182 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "sync" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + _ "gvisor.googlesource.com/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // origFileSize is the original file size. This many bytes should be + // copied up before the test file is modified. + origFileSize = 4096 + + // truncatedFileSize is the size to truncate all test files. + truncateFileSize = 10 +) + +// TestConcurrentCopyUp is a copy up stress test for an overlay. +// +// It creates a 64-level deep directory tree in the lower filesystem and +// populates the last subdirectory with 64 files containing random content: +// +// /lower +// /sudir0/.../subdir63/ +// /file0 +// ... +// /file63 +// +// The files are truncated concurrently by 4 goroutines per file. +// These goroutines contend with copying up all parent 64 subdirectories +// as well as the final file content. +// +// At the end of the test, we assert that the files respect the new truncated +// size and contain the content we expect. +func TestConcurrentCopyUp(t *testing.T) { + ctx := contexttest.Context(t) + files := makeOverlayTestFiles(t) + + var wg sync.WaitGroup + for _, file := range files { + for i := 0; i < 4; i++ { + wg.Add(1) + go func(o *overlayTestFile) { + if err := o.File.Dirent.Inode.Truncate(ctx, o.File.Dirent, truncateFileSize); err != nil { + t.Fatalf("failed to copy up: %v", err) + } + wg.Done() + }(file) + } + } + wg.Wait() + + for _, file := range files { + got := make([]byte, origFileSize) + n, err := file.File.Readv(ctx, usermem.BytesIOSequence(got)) + if int(n) != truncateFileSize { + t.Fatalf("read %d bytes from file, want %d", n, truncateFileSize) + } + if err != nil && err != io.EOF { + t.Fatalf("read got error %v, want nil", err) + } + if !bytes.Equal(got[:n], file.content[:truncateFileSize]) { + t.Fatalf("file content is %v, want %v", got[:n], file.content[:truncateFileSize]) + } + } +} + +type overlayTestFile struct { + File *fs.File + name string + content []byte +} + +func makeOverlayTestFiles(t *testing.T) []*overlayTestFile { + ctx := contexttest.Context(t) + + // Create a lower tmpfs mount. + fsys, _ := fs.FindFilesystem("tmpfs") + lower, err := fsys.Mount(contexttest.Context(t), "", fs.MountSourceFlags{}, "") + if err != nil { + t.Fatalf("failed to mount tmpfs: %v", err) + } + lowerRoot := fs.NewDirent(lower, "") + + // Make a deep set of subdirectories that everyone shares. + next := lowerRoot + for i := 0; i < 64; i++ { + name := fmt.Sprintf("subdir%d", i) + err := next.CreateDirectory(ctx, lowerRoot, name, fs.FilePermsFromMode(0777)) + if err != nil { + t.Fatalf("failed to create dir %q: %v", name, err) + } + next, err = next.Walk(ctx, lowerRoot, name) + if err != nil { + t.Fatalf("failed to walk to %q: %v", name, err) + } + } + + // Make a bunch of files in the last directory. + var files []*overlayTestFile + for i := 0; i < 64; i++ { + name := fmt.Sprintf("file%d", i) + f, err := next.Create(ctx, next, name, fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + t.Fatalf("failed to create file %q: %v", name, err) + } + defer f.DecRef() + + relname, _ := f.Dirent.FullName(lowerRoot) + + o := &overlayTestFile{ + name: relname, + content: make([]byte, origFileSize), + } + + if _, err := rand.Read(o.content); err != nil { + t.Fatalf("failed to read from /dev/urandom: %v", err) + } + + if _, err := f.Writev(ctx, usermem.BytesIOSequence(o.content)); err != nil { + t.Fatalf("failed to write content to file %q: %v", name, err) + } + + files = append(files, o) + } + + // Create an empty upper tmpfs mount which we will copy up into. + upper, err := fsys.Mount(ctx, "", fs.MountSourceFlags{}, "") + if err != nil { + t.Fatalf("failed to mount tmpfs: %v", err) + } + + // Construct an overlay root. + overlay, err := fs.NewOverlayRoot(ctx, upper, lower, fs.MountSourceFlags{}) + if err != nil { + t.Fatalf("failed to construct overlay root: %v", err) + } + + // Create a MountNamespace to traverse the file system. + mns, err := fs.NewMountNamespace(ctx, overlay) + if err != nil { + t.Fatalf("failed to construct mount manager: %v", err) + } + + // Walk to all of the files in the overlay, open them readable. + for _, f := range files { + d, err := mns.FindInode(ctx, mns.Root(), mns.Root(), f.name, 0) + if err != nil { + t.Fatalf("failed to find %q: %v", f.name, err) + } + defer d.DecRef() + + f.File, err = d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("failed to open file %q readable: %v", f.name, err) + } + } + + return files +} diff --git a/pkg/sentry/fs/dentry.go b/pkg/sentry/fs/dentry.go new file mode 100644 index 000000000..d42e8da81 --- /dev/null +++ b/pkg/sentry/fs/dentry.go @@ -0,0 +1,232 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sort" + + "gvisor.googlesource.com/gvisor/pkg/sentry/device" +) + +// DentAttr is the metadata of a directory entry. It is a subset of StableAttr. +type DentAttr struct { + // Type is the InodeType of an Inode. + Type InodeType + + // InodeID uniquely identifies an Inode on a device. + InodeID uint64 +} + +// GenericDentAttr returns a generic DentAttr where: +// +// Type == nt +// InodeID == the inode id of a new inode on device. +func GenericDentAttr(nt InodeType, device *device.Device) DentAttr { + return DentAttr{ + Type: nt, + InodeID: device.NextIno(), + } +} + +// DentrySerializer serializes a directory entry. +type DentrySerializer interface { + // CopyOut serializes a directory entry based on its name and attributes. + CopyOut(name string, attributes DentAttr) error + + // Written returns the number of bytes written. + Written() int +} + +// CollectEntriesSerializer copies DentAttrs to Entries. The order in +// which entries are encountered is preserved in Order. +type CollectEntriesSerializer struct { + Entries map[string]DentAttr + Order []string +} + +// CopyOut implements DentrySerializer.CopyOut. +func (c *CollectEntriesSerializer) CopyOut(name string, attr DentAttr) error { + if c.Entries == nil { + c.Entries = make(map[string]DentAttr) + } + c.Entries[name] = attr + c.Order = append(c.Order, name) + return nil +} + +// Written implements DentrySerializer.Written. +func (c *CollectEntriesSerializer) Written() int { + return len(c.Entries) +} + +// DirCtx is used by node.Readdir to emit directory entries. It is not +// thread-safe. +type DirCtx struct { + // Serializer is used to serialize the node attributes. + Serializer DentrySerializer + + // attrs are DentAttrs + attrs map[string]DentAttr + + // DirCursor is the directory cursor. + // TODO: Once Handles are removed this can just live in the + // respective FileOperations implementations and not need to get + // plumbed everywhere. + DirCursor *string +} + +// DirEmit is called for each directory entry. +func (c *DirCtx) DirEmit(name string, attr DentAttr) error { + if c.Serializer != nil { + if err := c.Serializer.CopyOut(name, attr); err != nil { + return err + } + } + if c.attrs == nil { + c.attrs = make(map[string]DentAttr) + } + c.attrs[name] = attr + return nil +} + +// DentAttrs returns a map of DentAttrs corresponding to the emitted directory +// entries. +func (c *DirCtx) DentAttrs() map[string]DentAttr { + if c.attrs == nil { + c.attrs = make(map[string]DentAttr) + } + return c.attrs +} + +// GenericReaddir serializes DentAttrs based on a SortedDentryMap that must +// contain _all_ up-to-date DentAttrs under a directory. If ctx.DirCursor is +// not nil, it is updated to the name of the last DentAttr that was +// successfully serialized. +// +// Returns the number of entries serialized. +func GenericReaddir(ctx *DirCtx, s *SortedDentryMap) (int, error) { + // Retrieve the next directory entries. + var names []string + var entries map[string]DentAttr + if ctx.DirCursor != nil { + names, entries = s.GetNext(*ctx.DirCursor) + } else { + names, entries = s.GetAll() + } + + // Try to serialize each entry. + var serialized int + for _, name := range names { + // Skip "" per POSIX. Skip "." and ".." which will be added by Dirent.Readdir. + if name == "" || name == "." || name == ".." { + continue + } + + // Emit the directory entry. + if err := ctx.DirEmit(name, entries[name]); err != nil { + // Return potentially a partial serialized count. + return serialized, err + } + + // We successfully serialized this entry. + serialized++ + + // Update the cursor with the name of the entry last serialized. + if ctx.DirCursor != nil { + *ctx.DirCursor = name + } + } + + // Everything was serialized. + return serialized, nil +} + +// SortedDentryMap is a sorted map of names and fs.DentAttr entries. +type SortedDentryMap struct { + // names is always kept in sorted-order. + names []string + + // entries maps names to fs.DentAttrs. + entries map[string]DentAttr +} + +// NewSortedDentryMap maintains entries in name sorted order. +func NewSortedDentryMap(entries map[string]DentAttr) *SortedDentryMap { + s := &SortedDentryMap{ + names: make([]string, 0, len(entries)), + entries: entries, + } + // Don't allow s.entries to be nil, because nil maps arn't Saveable. + if s.entries == nil { + s.entries = make(map[string]DentAttr) + } + + // Collect names from entries and sort them. + for name := range s.entries { + s.names = append(s.names, name) + } + sort.Strings(s.names) + return s +} + +// GetAll returns all names and entries in s. +func (s *SortedDentryMap) GetAll() ([]string, map[string]DentAttr) { + return s.names, s.entries +} + +// GetNext returns names after cursor in s and all entries. +func (s *SortedDentryMap) GetNext(cursor string) ([]string, map[string]DentAttr) { + i := sort.SearchStrings(s.names, cursor) + if i == len(s.names) { + return nil, s.entries + } + + // Return everything strictly after the cursor. + if s.names[i] == cursor { + i++ + } + return s.names[i:], s.entries +} + +// Add adds an entry with the given name to the map, preserving sort order. If +// name already exists in the map, its entry will be overwritten. +func (s *SortedDentryMap) Add(name string, entry DentAttr) { + if _, ok := s.entries[name]; !ok { + // Map does not yet contain an entry with this name. We must + // insert it in s.names at the appropriate spot. + i := sort.SearchStrings(s.names, name) + s.names = append(s.names, "") + copy(s.names[i+1:], s.names[i:]) + s.names[i] = name + } + s.entries[name] = entry +} + +// Remove removes an entry with the given name from the map, preserving sort order. +func (s *SortedDentryMap) Remove(name string) { + if _, ok := s.entries[name]; !ok { + return + } + i := sort.SearchStrings(s.names, name) + copy(s.names[i:], s.names[i+1:]) + s.names = s.names[:len(s.names)-1] + delete(s.entries, name) +} + +// Contains reports whether the map contains an entry with the given name. +func (s *SortedDentryMap) Contains(name string) bool { + _, ok := s.entries[name] + return ok +} diff --git a/pkg/sentry/fs/dev/BUILD b/pkg/sentry/fs/dev/BUILD new file mode 100644 index 000000000..42049ecb5 --- /dev/null +++ b/pkg/sentry/fs/dev/BUILD @@ -0,0 +1,53 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "dev_state", + srcs = [ + "dev.go", + "fs.go", + "full.go", + "null.go", + "random.go", + ], + out = "dev_state.go", + package = "dev", +) + +go_library( + name = "dev", + srcs = [ + "dev.go", + "dev_state.go", + "device.go", + "fs.go", + "full.go", + "null.go", + "random.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/dev", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ashmem", + "//pkg/sentry/fs/binder", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/fs/tmpfs", + "//pkg/sentry/memmap", + "//pkg/sentry/mm", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/fs/dev/dev.go b/pkg/sentry/fs/dev/dev.go new file mode 100644 index 000000000..36c61bfc2 --- /dev/null +++ b/pkg/sentry/fs/dev/dev.go @@ -0,0 +1,122 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dev provides a filesystem with simple devices. +package dev + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ashmem" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/binder" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/tmpfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Dev is the root node. +type Dev struct { + ramfs.Dir +} + +func newCharacterDevice(iops fs.InodeOperations, msrc *fs.MountSource) *fs.Inode { + return fs.NewInode(iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.CharacterDevice, + }) +} + +func newDirectory(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + iops := &ramfs.Dir{} + iops.InitDir(ctx, map[string]*fs.Inode{}, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +func newSymlink(ctx context.Context, target string, msrc *fs.MountSource) *fs.Inode { + iops := &ramfs.Symlink{} + iops.InitSymlink(ctx, fs.RootOwner, target) + return fs.NewInode(iops, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Symlink, + }) +} + +// New returns the root node of a device filesystem. +func New(ctx context.Context, msrc *fs.MountSource, binderEnabled bool, ashmemEnabled bool) *fs.Inode { + d := &Dev{} + + contents := map[string]*fs.Inode{ + "fd": newSymlink(ctx, "/proc/self/fd", msrc), + "stdin": newSymlink(ctx, "/proc/self/fd/0", msrc), + "stdout": newSymlink(ctx, "/proc/self/fd/1", msrc), + "stderr": newSymlink(ctx, "/proc/self/fd/2", msrc), + + "null": newCharacterDevice(newNullDevice(ctx, fs.RootOwner, 0666), msrc), + "zero": newCharacterDevice(newZeroDevice(ctx, fs.RootOwner, 0666), msrc), + "full": newCharacterDevice(newFullDevice(ctx, fs.RootOwner, 0666), msrc), + + // This is not as good as /dev/random in linux because go + // runtime uses sys_random and /dev/urandom internally. + // According to 'man 4 random', this will be sufficient unless + // application uses this to generate long-lived GPG/SSL/SSH + // keys. + "random": newCharacterDevice(newRandomDevice(ctx, fs.RootOwner, 0444), msrc), + "urandom": newCharacterDevice(newRandomDevice(ctx, fs.RootOwner, 0444), msrc), + + "shm": tmpfs.NewDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0777), msrc, platform.FromContext(ctx)), + + // A devpts is typically mounted at /dev/pts to provide + // pseudoterminal support. Place an empty directory there for + // the devpts to be mounted over. + "pts": newDirectory(ctx, msrc), + // Similarly, applications expect a ptmx device at /dev/ptmx + // connected to the terminals provided by /dev/pts/. Rather + // than creating a device directly (which requires a hairy + // lookup on open to determine if a devpts exists), just create + // a symlink to the ptmx provided by devpts. (The Linux devpts + // documentation recommends this). + // + // If no devpts is mounted, this will simply be a dangling + // symlink, which is fine. + "ptmx": newSymlink(ctx, "pts/ptmx", msrc), + } + + if binderEnabled { + binder := binder.NewDevice(ctx, fs.RootOwner, fs.FilePermsFromMode(0666)) + contents["binder"] = newCharacterDevice(binder, msrc) + } + + if ashmemEnabled { + ashmem := ashmem.NewDevice(ctx, fs.RootOwner, fs.FilePermsFromMode(0666)) + contents["ashmem"] = newCharacterDevice(ashmem, msrc) + } + + d.InitDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(d, msrc, fs.StableAttr{ + DeviceID: devDevice.DeviceID(), + InodeID: devDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} diff --git a/pkg/sentry/fs/dev/device.go b/pkg/sentry/fs/dev/device.go new file mode 100644 index 000000000..9d935e008 --- /dev/null +++ b/pkg/sentry/fs/dev/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// devDevice is the pseudo-filesystem device. +var devDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/dev/fs.go b/pkg/sentry/fs/dev/fs.go new file mode 100644 index 000000000..4945ac962 --- /dev/null +++ b/pkg/sentry/fs/dev/fs.go @@ -0,0 +1,90 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Optional key containing boolean flag which specifies if Android Binder IPC should be enabled. +const binderEnabledKey = "binder_enabled" + +// Optional key containing boolean flag which specifies if Android ashmem should be enabled. +const ashmemEnabledKey = "ashmem_enabled" + +// filesystem is a devtmpfs. +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name underwhich the filesystem is registered. +// Name matches drivers/base/devtmpfs.c:dev_fs_type.name. +const FilesystemName = "devtmpfs" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, devtmpfs does the same thing. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a devtmpfs root that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // device is always ignored. + // devtmpfs backed by ramfs ignores bad options. See fs/ramfs/inode.c:ramfs_parse_options. + // -> we should consider parsing the mode and backing devtmpfs by this. + + // Parse generic comma-separated key=value options. + options := fs.GenericMountSourceOptions(data) + + // binerEnabledKey is optional and binder is disabled by default. + binderEnabled := false + if beStr, exists := options[binderEnabledKey]; exists { + var err error + binderEnabled, err = strconv.ParseBool(beStr) + if err != nil { + return nil, syserror.EINVAL + } + } + + // ashmemEnabledKey is optional and ashmem is disabled by default. + ashmemEnabled := false + if aeStr, exists := options[ashmemEnabledKey]; exists { + var err error + ashmemEnabled, err = strconv.ParseBool(aeStr) + if err != nil { + return nil, syserror.EINVAL + } + } + + // Construct the devtmpfs root. + return New(ctx, fs.NewNonCachingMountSource(f, flags), binderEnabled, ashmemEnabled), nil +} diff --git a/pkg/sentry/fs/dev/full.go b/pkg/sentry/fs/dev/full.go new file mode 100644 index 000000000..e13eb6c03 --- /dev/null +++ b/pkg/sentry/fs/dev/full.go @@ -0,0 +1,53 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// fullDevice is used to implement /dev/full. +type fullDevice struct { + ramfs.Entry +} + +func newFullDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *fullDevice { + f := &fullDevice{} + f.InitEntry(ctx, owner, fs.FilePermsFromMode(mode)) + return f +} + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev by +// returining ENOSPC. +func (f *fullDevice) DeprecatedPwritev(_ context.Context, _ usermem.IOSequence, _ int64) (int64, error) { + return 0, syserror.ENOSPC +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (f *fullDevice) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, _ int64) (int64, error) { + return dst.ZeroOut(ctx, math.MaxInt64) +} + +// Truncate should be simply ignored for character devices on linux. +func (f *fullDevice) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} diff --git a/pkg/sentry/fs/dev/null.go b/pkg/sentry/fs/dev/null.go new file mode 100644 index 000000000..66b8ba967 --- /dev/null +++ b/pkg/sentry/fs/dev/null.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "io" + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type nullDevice struct { + ramfs.Entry +} + +func newNullDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *nullDevice { + n := &nullDevice{} + n.InitEntry(ctx, owner, fs.FilePermsFromMode(mode)) + return n +} + +// DeprecatedPreadv reads data from the device. +func (n *nullDevice) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + return 0, io.EOF +} + +// DeprecatedPwritev discards writes. +func (n *nullDevice) DeprecatedPwritev(_ context.Context, src usermem.IOSequence, offset int64) (int64, error) { + return src.NumBytes(), nil +} + +// Truncate should be simply ignored for character devices on linux. +func (n *nullDevice) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +type zeroDevice struct { + nullDevice +} + +func newZeroDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *zeroDevice { + zd := &zeroDevice{} + zd.InitEntry(ctx, owner, fs.FilePermsFromMode(mode)) + return zd +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (zd *zeroDevice) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + return dst.ZeroOut(ctx, math.MaxInt64) +} + +// GetFile overrides ramfs.Entry.GetFile and returns a zeroFile instead. +func (zd *zeroDevice) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + // Allow pread(2) and pwrite(2) on this file. + flags.Pread = true + flags.Pwrite = true + + return fs.NewFile(ctx, dirent, flags, &zeroFileOperations{ + FileOperations: &fsutil.Handle{HandleOperations: dirent.Inode.HandleOps()}, + }), nil +} + +type zeroFileOperations struct { + fs.FileOperations +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (*zeroFileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + m, err := mm.NewSharedAnonMappable(opts.Length, platform.FromContext(ctx)) + if err != nil { + return err + } + opts.MappingIdentity = m + opts.Mappable = m + return nil +} diff --git a/pkg/sentry/fs/dev/random.go b/pkg/sentry/fs/dev/random.go new file mode 100644 index 000000000..0402f9355 --- /dev/null +++ b/pkg/sentry/fs/dev/random.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dev + +import ( + "crypto/rand" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type randomDevice struct { + ramfs.Entry +} + +func newRandomDevice(ctx context.Context, owner fs.FileOwner, mode linux.FileMode) *randomDevice { + r := &randomDevice{} + r.InitEntry(ctx, owner, fs.FilePermsFromMode(mode)) + return r +} + +// DeprecatedPreadv reads random data. +func (*randomDevice) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + return dst.CopyOutFrom(ctx, safemem.FromIOReader{rand.Reader}) +} + +// DeprecatedPwritev implements fs.HandleOperations.DeprecatedPwritev. +func (*randomDevice) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // On Linux, "Writing to /dev/random or /dev/urandom will update the + // entropy pool with the data written, but this will not result in a higher + // entropy count" - random(4). We don't need to support this, but we do + // need to support the write, so just make it a no-op a la /dev/null. + return src.NumBytes(), nil +} + +// Truncate should be simply ignored for character devices on linux. +func (r *randomDevice) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} diff --git a/pkg/sentry/fs/dirent.go b/pkg/sentry/fs/dirent.go new file mode 100644 index 000000000..a75c7ea7e --- /dev/null +++ b/pkg/sentry/fs/dirent.go @@ -0,0 +1,1605 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "path" + "sort" + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +type globalDirentMap struct { + mu sync.Mutex + dirents map[*Dirent]struct{} +} + +func (g *globalDirentMap) add(d *Dirent) { + g.mu.Lock() + g.dirents[d] = struct{}{} + g.mu.Unlock() +} + +func (g *globalDirentMap) remove(d *Dirent) { + g.mu.Lock() + delete(g.dirents, d) + g.mu.Unlock() +} + +// allDirents keeps track of all Dirents that need to be considered in +// Save/Restore for inode mappings. +// +// Because inodes do not hold paths, but inodes for external file systems map +// to an external path, every user-visible Dirent is stored in this map and +// iterated through upon save to keep inode ID -> restore path mappings. +var allDirents = globalDirentMap{ + dirents: map[*Dirent]struct{}{}, +} + +// renameMu protects the parent of *all* Dirents. (See explanation in +// lockForRename.) +// +// See fs.go for lock ordering. +var renameMu sync.RWMutex + +// Dirent holds an Inode in memory. +// +// A Dirent may be negative or positive: +// +// A negative Dirent contains a nil Inode and indicates that a path does not exist. This +// is a convention taken from the Linux dcache, see fs/dcache.c. A negative Dirent remains +// cached until a create operation replaces it with a positive Dirent. A negative Dirent +// always has one reference owned by its parent and takes _no_ reference on its parent. This +// ensures that its parent can be unhashed regardless of negative children. +// +// A positive Dirent contains a non-nil Inode. It remains cached for as long as there remain +// references to it. A positive Dirent always takes a reference on its parent. +// +// A Dirent may be a root Dirent (parent is nil) or be parented (non-nil parent). +// +// Dirents currently do not attempt to free entries that lack application references under +// memory pressure. +type Dirent struct { + // AtomicRefCount is our reference count. + refs.AtomicRefCount + + // userVisible indicates whether the Dirent is visible to the user or + // not. Only user-visible Dirents should save inode mappings in + // save/restore, as only they hold the real path to the underlying + // inode. + // + // See newDirent and Dirent.afterLoad. + userVisible bool + + // Inode is the underlying file object. + // + // Inode is exported currently to assist in implementing overlay Inodes (where a + // Inode.InodeOperations.Lookup may need to merge the Inode contained in a positive Dirent with + // another Inode). This is normally done before the Dirent is parented (there are + // no external references to it). + // + // Other objects in the VFS may take a reference to this Inode but only while holding + // a reference to this Dirent. + Inode *Inode + + // name is the name (i.e. basename) of this entry. + // + // N.B. name is protected by parent.mu, not this node's mu! + name string + + // parent is the parent directory. + // + // We hold a hard reference to the parent. + // + // parent is protected by renameMu. + parent *Dirent + + // deleted may be set atomically when removed. + deleted int32 `state:"nosave"` + + // frozen indicates this entry can't walk to unknown nodes. + frozen bool + + // mounted is true if Dirent is a mount point, similar to include/linux/dcache.h:DCACHE_MOUNTED. + mounted bool + + // direntEntry identifies this Dirent as an element in a DirentCache. DirentCaches + // and their contents are not saved. + direntEntry `state:"nosave"` + + // dirMu is a read-write mutex that protects caching decisions made by directory operations. + // Lock ordering: dirMu must be taken before mu (see below). Details: + // + // dirMu does not participate in Rename; instead mu and renameMu are used, see lockForRename. + // + // Creation and Removal operations must be synchronized with Walk to prevent stale negative + // caching. Note that this requirement is not specific to a _Dirent_ doing negative caching. + // The following race exists at any level of the VFS: + // + // For an object D that represents a directory, containing a cache of non-existent paths, + // protected by D.cacheMu: + // + // T1: T2: + // D.lookup(name) + // --> ENOENT + // D.create(name) + // --> success + // D.cacheMu.Lock + // delete(D.cache, name) + // D.cacheMu.Unlock + // D.cacheMu.Lock + // D.cache[name] = true + // D.cacheMu.Unlock + // + // D.lookup(name) + // D.cacheMu.Lock + // if D.cache[name] { + // --> ENOENT (wrong) + // } + // D.cacheMu.Lock + // + // Correct: + // + // T1: T2: + // D.cacheMu.Lock + // D.lookup(name) + // --> ENOENT + // D.cache[name] = true + // D.cacheMu.Unlock + // D.cacheMu.Lock + // D.create(name) + // --> success + // delete(D.cache, name) + // D.cacheMu.Unlock + // + // D.cacheMu.Lock + // D.lookup(name) + // --> EXISTS (right) + // D.cacheMu.Unlock + // + // Note that the above "correct" solution causes too much lock contention: all lookups are + // synchronized with each other. This is a problem because lookups are involved in any VFS + // path operation. + // + // A Dirent diverges from the single D.cacheMu and instead uses two locks: dirMu to protect + // concurrent creation/removal/lookup caching, and mu to protect the Dirent's children map + // in general. + // + // This allows for concurrent Walks to be executed in order to pipeline lookups. For instance + // for a hot directory /a/b, threads T1, T2, T3 will only block on each other update the + // children map of /a/b when their individual lookups complete. + // + // T1: T2: T3: + // stat(/a/b/c) stat(/a/b/d) stat(/a/b/e) + dirMu sync.RWMutex `state:"nosave"` + + // mu protects the below fields. Lock ordering: mu must be taken after dirMu. + mu sync.Mutex `state:"nosave"` + + // children are cached via weak references. + children map[string]*refs.WeakRef +} + +// NewDirent returns a new root Dirent, taking the caller's reference on inode. The caller +// holds the only reference to the Dirent. Parents may call hashChild to parent this Dirent. +func NewDirent(inode *Inode, name string) *Dirent { + d := newDirent(inode, name) + allDirents.add(d) + d.userVisible = true + return d +} + +// NewTransientDirent creates a transient Dirent that shouldn't actually be +// visible to users. +func NewTransientDirent(inode *Inode) *Dirent { + return newDirent(inode, "transient") +} + +func newDirent(inode *Inode, name string) *Dirent { + // The Dirent needs to maintain one reference to MountSource. + if inode != nil { + inode.MountSource.IncDirentRefs() + } + return &Dirent{ + Inode: inode, + name: name, + children: make(map[string]*refs.WeakRef), + } +} + +// NewNegativeDirent returns a new root negative Dirent. Otherwise same as NewDirent. +func NewNegativeDirent(name string) *Dirent { + return newDirent(nil, name) +} + +// IsRoot returns true if d is a root Dirent. +func (d *Dirent) IsRoot() bool { + return d.parent == nil +} + +// IsNegative returns true if d represents a path that does not exist. +func (d *Dirent) IsNegative() bool { + return d.Inode == nil +} + +// hashChild will hash child into the children list of its new parent d, carrying over +// any "frozen" state from d. +// +// Returns (*WeakRef, true) if hashing child caused a Dirent to be unhashed. The caller must +// validate the returned unhashed weak reference. Common cases: +// +// * Remove: hashing a negative Dirent unhashes a positive Dirent (unimplemented). +// * Create: hashing a positive Dirent unhashes a negative Dirent. +// * Lookup: hashing any Dirent should not unhash any other Dirent. +// +// Preconditions: +// * d.mu must be held. +// * child must be a root Dirent. +func (d *Dirent) hashChild(child *Dirent) (*refs.WeakRef, bool) { + if !child.IsRoot() { + panic("hashChild must be a root Dirent") + } + + // Assign parentage. + child.parent = d + + // Avoid letting negative Dirents take a reference on their parent; these Dirents + // don't have a role outside of the Dirent cache and should not keep their parent + // indefinitely pinned. + if !child.IsNegative() { + // Positive dirents must take a reference on their parent. + d.IncRef() + } + + // Carry over parent's frozen state. + child.frozen = d.frozen + + return d.hashChildParentSet(child) +} + +// hashChildParentSet will rehash child into the children list of its parent d. +// +// Assumes that child.parent = d already. +func (d *Dirent) hashChildParentSet(child *Dirent) (*refs.WeakRef, bool) { + if child.parent != d { + panic("hashChildParentSet assumes the child already belongs to the parent") + } + + // Save any replaced child so our caller can validate it. + old, ok := d.children[child.name] + + // Hash the child. + d.children[child.name] = refs.NewWeakRef(child, nil) + + // Return any replaced child. + return old, ok +} + +// SyncAll iterates through mount points under d and writes back their buffered +// modifications to filesystems. +func (d *Dirent) SyncAll(ctx context.Context) { + d.mu.Lock() + defer d.mu.Unlock() + + // For negative Dirents there is nothing to sync. By definition these are + // leaves (there is nothing left to traverse). + if d.IsNegative() { + return + } + + // There is nothing to sync for a read-only filesystem. + if !d.Inode.MountSource.Flags.ReadOnly { + // FIXME: This should be a mount traversal, not a + // Dirent traversal, because some Inodes that need to be synced + // may no longer be reachable by name (after sys_unlink). + // + // Write out metadata, dirty page cached pages, and sync disk/remote + // caches. + d.Inode.WriteOut(ctx) + } + + // Continue iterating through other mounted filesystems. + for _, w := range d.children { + if child := w.Get(); child != nil { + child.(*Dirent).SyncAll(ctx) + child.DecRef() + } + } +} + +// FullName returns the fully-qualified name and a boolean value representing +// whether this Dirent was a descendant of root. +// If the root argument is nil it is assumed to be the root of the Dirent tree. +func (d *Dirent) FullName(root *Dirent) (string, bool) { + renameMu.RLock() + defer renameMu.RUnlock() + return d.fullName(root) +} + +// fullName returns the fully-qualified name and a boolean value representing +// if the root node was reachable from this Dirent. +func (d *Dirent) fullName(root *Dirent) (string, bool) { + if d == root { + return "/", true + } + + if d.IsRoot() { + if root != nil { + // We reached the top of the Dirent tree but did not encounter + // the given root. Return false for reachable so the caller + // can handle this situation accordingly. + return d.name, false + } + return d.name, true + } + + // Traverse up to parent. + d.parent.mu.Lock() + name := d.name + d.parent.mu.Unlock() + parentName, reachable := d.parent.fullName(root) + s := path.Join(parentName, name) + if atomic.LoadInt32(&d.deleted) != 0 { + return s + " (deleted)", reachable + } + return s, reachable +} + +func (d *Dirent) freeze() { + if d.frozen { + // Already frozen. + return + } + d.frozen = true + + // Take a reference when freezing. + for _, w := range d.children { + if child := w.Get(); child != nil { + // NOTE: We would normally drop the reference here. But + // instead we're hanging on to it. + ch := child.(*Dirent) + ch.Freeze() + } + } + + // Drop all expired weak references. + d.flush() +} + +// Freeze prevents this dirent from walking to more nodes. Freeze is applied +// recursively to all children. +// +// If this particular Dirent represents a Virtual node, then Walks and Creates +// may proceed as before. +// +// Freeze can only be called before the application starts running, otherwise +// the root it might be out of sync with the application root if modified by +// sys_chroot. +func (d *Dirent) Freeze() { + d.mu.Lock() + defer d.mu.Unlock() + d.freeze() +} + +// descendantOf returns true if the receiver dirent is equal to, or a +// descendant of, the argument dirent. +// +// d.mu must be held. +func (d *Dirent) descendantOf(p *Dirent) bool { + if d == p { + return true + } + if d.IsRoot() { + return false + } + return d.parent.descendantOf(p) +} + +// walk walks to path name starting at the dirent, and will not traverse above +// root Dirent. +// +// If walkMayUnlock is true then walk can unlock d.mu to execute a slow +// Inode.Lookup, otherwise walk will keep d.mu locked. +// +// Preconditions: +// - d.mu must be held. +// - name must must not contain "/"s. +func (d *Dirent) walk(ctx context.Context, root *Dirent, name string, walkMayUnlock bool) (*Dirent, error) { + if !IsDir(d.Inode.StableAttr) { + return nil, syscall.ENOTDIR + } + if name == "" || name == "." { + d.IncRef() + return d, nil + } else if name == ".." { + renameMu.RLock() + // Respect the chroot. Note that in Linux there is no check to enforce + // that d is a descendant of root. + if d == root { + d.IncRef() + renameMu.RUnlock() + return d, nil + } + // Are we already at the root? Then ".." is ".". + if d.IsRoot() { + d.IncRef() + renameMu.RUnlock() + return d, nil + } + d.parent.IncRef() + renameMu.RUnlock() + return d.parent, nil + } + + if w, ok := d.children[name]; ok { + // Try to resolve the weak reference to a hard reference. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + // Is this a negative Dirent? + if cd.IsNegative() { + // Don't leak a reference; this doesn't matter as much for negative Dirents, + // which don't hold a hard reference on their parent (their parent holds a + // hard reference on them, and they contain virtually no state). But this is + // good house-keeping. + child.DecRef() + return nil, syscall.ENOENT + } + + // Do we need to revalidate this child? + // + // We never allow the file system to revalidate mounts, that could cause them + // to unexpectedly drop out before umount. + if cd.mounted || !cd.Inode.MountSource.Revalidate(cd) { + // Good to go. This is the fast-path. + return cd, nil + } + + // If we're revalidating a child, we must ensure all inotify watches release + // their pins on the child. Inotify doesn't properly support filesystems that + // revalidate dirents (since watches are lost on revalidation), but if we fail + // to unpin the watches child will never be GCed. + cd.Inode.Watches.Unpin(cd) + + // This child needs to be revalidated, fallthrough to unhash it. Make sure + // to not leak a reference from Get(). + // + // Note that previous lookups may still have a reference to this stale child; + // this can't be helped, but we can ensure that *new* lookups are up-to-date. + child.DecRef() + } + + // Either our weak reference expired or we need to revalidate it. Unhash child first, we're + // about to replace it. + delete(d.children, name) + w.Drop() + } + + // Are we allowed to do the lookup? + if d.frozen && !d.Inode.IsVirtual() { + return nil, syscall.ENOENT + } + + // Slow path: load the InodeOperations into memory. Since this is a hot path and the lookup may be expensive, + // if possible release the lock and re-acquire it. + if walkMayUnlock { + d.mu.Unlock() + } + c, err := d.Inode.Lookup(ctx, name) + if walkMayUnlock { + d.mu.Lock() + } + // No dice. + if err != nil { + return nil, err + } + + // Sanity check c, its name must be consistent. + if c.name != name { + panic(fmt.Sprintf("lookup from %q to %q returned unexpected name %q", d.name, name, c.name)) + } + + // Now that we have the lock again, check if we raced. + if w, ok := d.children[name]; ok { + // Someone else looked up or created a child at name before us. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + // There are active references to the existing child, prefer it to the one we + // retrieved from Lookup. Likely the Lookup happened very close to the insertion + // of child, so considering one stale over the other is fairly arbitrary. + c.DecRef() + + // The child that was installed could be negative. + if cd.IsNegative() { + // If so, don't leak a reference and short circuit. + child.DecRef() + return nil, syscall.ENOENT + } + + // We make the judgement call that if c raced with cd they are close enough to have + // the same staleness, so we don't attempt to revalidate cd. In Linux revalidations + // can continue indefinitely (see fs/namei.c, retry_estale); we try to avoid this. + return cd, nil + } + + // Weak reference expired. We went through a full cycle of create/destroy in the time + // we did the Inode.Lookup. Fully drop the weak reference and fallback to using the child + // we looked up. + delete(d.children, name) + w.Drop() + } + + // Give the looked up child a parent. We cannot kick out entries, since we just checked above + // that there is nothing at name in d's children list. + if _, kicked := d.hashChild(c); kicked { + // Yell loudly. + panic(fmt.Sprintf("hashed child %q over existing child", c.name)) + } + + // Is this a negative Dirent? + if c.IsNegative() { + // Don't drop a reference on the negative Dirent, it was just installed and this is the + // only reference we'll ever get. d owns the reference. + return nil, syscall.ENOENT + } + + // Return the positive Dirent. + return c, nil +} + +// Walk walks to a new dirent, and will not walk higher than the given root +// Dirent, which must not be nil. +func (d *Dirent) Walk(ctx context.Context, root *Dirent, name string) (*Dirent, error) { + if root == nil { + panic("Dirent.Walk: root must not be nil") + } + + d.dirMu.RLock() + d.mu.Lock() + child, err := d.walk(ctx, root, name, true /* may unlock */) + d.mu.Unlock() + d.dirMu.RUnlock() + + return child, err +} + +// exists returns true if name exists in relation to d. +// +// Preconditions: d.mu must be held. +func (d *Dirent) exists(ctx context.Context, root *Dirent, name string) bool { + child, err := d.walk(ctx, root, name, true /* may unlock */) + if err != nil { + // Child may not exist. + return false + } + // Child exists. + child.DecRef() + return true +} + +// lockDirectory should be called for any operation that changes this `d`s +// children (creating or removing them). +func (d *Dirent) lockDirectory() func() { + if d.Inode.overlay != nil { + // overlay copyUp may need to look at Dirent parents, and hence + // may need renameMu. + renameMu.RLock() + d.dirMu.Lock() + d.mu.Lock() + return func() { + d.mu.Unlock() + d.dirMu.Unlock() + renameMu.RUnlock() + } + } + + d.dirMu.Lock() + d.mu.Lock() + return func() { + d.mu.Unlock() + d.dirMu.Unlock() + } +} + +// Create creates a new regular file in this directory. +func (d *Dirent) Create(ctx context.Context, root *Dirent, name string, flags FileFlags, perms FilePermissions) (*File, error) { + unlock := d.lockDirectory() + defer unlock() + + // Does something already exist? + if d.exists(ctx, root, name) { + return nil, syscall.EEXIST + } + + // Are we frozen? + if d.frozen && !d.Inode.IsVirtual() { + return nil, syscall.ENOENT + } + + // Try the create. We need to trust the file system to return EEXIST (or something + // that will translate to EEXIST) if name already exists. + file, err := d.Inode.Create(ctx, d, name, flags, perms) + if err != nil { + return nil, err + } + child := file.Dirent + + // Sanity check c, its name must be consistent. + if child.name != name { + panic(fmt.Sprintf("create from %q to %q returned unexpected name %q", d.name, name, child.name)) + } + + // File systems cannot return a negative Dirent on Create, that makes no sense. + if child.IsNegative() { + panic(fmt.Sprintf("create from %q to %q returned negative Dirent", d.name, name)) + } + + // Hash the child into its parent. We can only kick out a Dirent if it is negative + // (we are replacing something that does not exist with something that now does). + if w, kicked := d.hashChild(child); kicked { + if old := w.Get(); old != nil { + if !old.(*Dirent).IsNegative() { + panic(fmt.Sprintf("hashed child %q over a positive child", child.name)) + } + // Don't leak a reference. + old.DecRef() + + // Drop d's reference. + old.DecRef() + } + + // Finally drop the useless weak reference on the floor. + w.Drop() + } + + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + + // Allow the file system to take extra references on c. + child.maybeExtendReference() + + // Return the reference and the new file. When the last reference to + // the file is dropped, file.Dirent may no longer be cached. + return file, nil +} + +// genericCreate executes create if name does not exist. Removes a negative Dirent at name if +// create succeeds. +// +// Preconditions: d.mu must be held. +func (d *Dirent) genericCreate(ctx context.Context, root *Dirent, name string, create func() error) error { + // Does something already exist? + if d.exists(ctx, root, name) { + return syscall.EEXIST + } + + // Are we frozen? + if d.frozen && !d.Inode.IsVirtual() { + return syscall.ENOENT + } + + // Execute the create operation. + if err := create(); err != nil { + return err + } + + // Remove any negative Dirent. We've already asserted above with d.exists + // that the only thing remaining here can be a negative Dirent. + if w, ok := d.children[name]; ok { + // Same as Create. + if old := w.Get(); old != nil { + if !old.(*Dirent).IsNegative() { + panic(fmt.Sprintf("hashed over a positive child %q", old.(*Dirent).name)) + } + // Don't leak a reference. + old.DecRef() + + // Drop d's reference. + old.DecRef() + } + + // Unhash the negative Dirent, name needs to exist now. + delete(d.children, name) + + // Finally drop the useless weak reference on the floor. + w.Drop() + } + + return nil +} + +// CreateLink creates a new link in this directory. +func (d *Dirent) CreateLink(ctx context.Context, root *Dirent, oldname, newname string) error { + unlock := d.lockDirectory() + defer unlock() + + return d.genericCreate(ctx, root, newname, func() error { + if err := d.Inode.CreateLink(ctx, d, oldname, newname); err != nil { + return err + } + d.Inode.Watches.Notify(newname, linux.IN_CREATE, 0) + return nil + }) +} + +// CreateHardLink creates a new hard link in this directory. +func (d *Dirent) CreateHardLink(ctx context.Context, root *Dirent, target *Dirent, name string) error { + unlock := d.lockDirectory() + defer unlock() + + // Make sure that target does not span filesystems. + if d.Inode.MountSource != target.Inode.MountSource { + return syscall.EXDEV + } + + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateHardLink(ctx, d, target, name); err != nil { + return err + } + target.Inode.Watches.Notify("", linux.IN_ATTRIB, 0) // Link count change. + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + return nil + }) +} + +// CreateDirectory creates a new directory under this dirent. +func (d *Dirent) CreateDirectory(ctx context.Context, root *Dirent, name string, perms FilePermissions) error { + unlock := d.lockDirectory() + defer unlock() + + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateDirectory(ctx, d, name, perms); err != nil { + return err + } + d.Inode.Watches.Notify(name, linux.IN_ISDIR|linux.IN_CREATE, 0) + return nil + }) +} + +// Bind satisfies the InodeOperations interface; otherwise same as GetFile. +func (d *Dirent) Bind(ctx context.Context, root *Dirent, name string, socket unix.BoundEndpoint, perms FilePermissions) error { + d.dirMu.Lock() + defer d.dirMu.Unlock() + d.mu.Lock() + defer d.mu.Unlock() + + err := d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.Bind(ctx, name, socket, perms); err != nil { + return err + } + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + return nil + }) + if err == syscall.EEXIST { + return syscall.EADDRINUSE + } + return err +} + +// CreateFifo creates a new named pipe under this dirent. +func (d *Dirent) CreateFifo(ctx context.Context, root *Dirent, name string, perms FilePermissions) error { + unlock := d.lockDirectory() + defer unlock() + + return d.genericCreate(ctx, root, name, func() error { + if err := d.Inode.CreateFifo(ctx, d, name, perms); err != nil { + return err + } + d.Inode.Watches.Notify(name, linux.IN_CREATE, 0) + return nil + }) +} + +// getDotAttrs returns the DentAttrs corresponding to "." and ".." directories. +func (d *Dirent) getDotAttrs(root *Dirent) (DentAttr, DentAttr) { + // Get '.'. + sattr := d.Inode.StableAttr + dot := DentAttr{ + Type: sattr.Type, + InodeID: sattr.InodeID, + } + + // Get '..'. + if !d.IsRoot() && d.descendantOf(root) { + // Dirent is a descendant of the root. Get its parent's attrs. + psattr := d.parent.Inode.StableAttr + dotdot := DentAttr{ + Type: psattr.Type, + InodeID: psattr.InodeID, + } + return dot, dotdot + } + // Dirent is either root or not a descendant of the root. ".." is the + // same as ".". + return dot, dot +} + +// readdirFrozen returns readdir results based solely on the frozen children. +func (d *Dirent) readdirFrozen(root *Dirent, offset int64, dirCtx *DirCtx) (int64, error) { + // Collect attrs for "." and "..". + attrs := make(map[string]DentAttr) + names := []string{".", ".."} + attrs["."], attrs[".."] = d.getDotAttrs(root) + + // Get info from all children. + d.mu.Lock() + defer d.mu.Unlock() + for name, w := range d.children { + if child := w.Get(); child != nil { + defer child.DecRef() + + // Skip negative children. + if child.(*Dirent).IsNegative() { + continue + } + + sattr := child.(*Dirent).Inode.StableAttr + attrs[name] = DentAttr{ + Type: sattr.Type, + InodeID: sattr.InodeID, + } + names = append(names, name) + } + } + + sort.Strings(names) + + if int(offset) >= len(names) { + return offset, nil + } + names = names[int(offset):] + for _, name := range names { + if err := dirCtx.DirEmit(name, attrs[name]); err != nil { + return offset, err + } + offset++ + } + return offset, nil +} + +// DirIterator is an open directory containing directory entries that can be read. +type DirIterator interface { + // IterateDir emits directory entries by calling dirCtx.EmitDir, beginning + // with the entry at offset and returning the next directory offset. + // + // Entries for "." and ".." must *not* be included. + // + // If the offset returned is the same as the argument offset, then + // nothing has been serialized. This is equivalent to reaching EOF. + // In this case serializer.Written() should return 0. + // + // The order of entries to emit must be consistent between Readdir + // calls, and must start with the given offset. + // + // The caller must ensure that this operation is permitted. + IterateDir(ctx context.Context, dirCtx *DirCtx, offset int) (int, error) +} + +// DirentReaddir serializes the directory entries of d including "." and "..". +// +// Arguments: +// +// * d: the Dirent of the directory being read; required to provide "." and "..". +// * it: the directory iterator; which represents an open directory handle. +// * root: fs root; if d is equal to the root, then '..' will refer to d. +// * ctx: context provided to file systems in order to select and serialize entries. +// * offset: the current directory offset. +// +// Returns the offset of the *next* element which was not serialized. +func DirentReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent, dirCtx *DirCtx, offset int64) (int64, error) { + offset, err := direntReaddir(ctx, d, it, root, dirCtx, offset) + // Serializing any directory entries at all means success. + if dirCtx.Serializer.Written() > 0 { + return offset, nil + } + return offset, err +} + +func direntReaddir(ctx context.Context, d *Dirent, it DirIterator, root *Dirent, dirCtx *DirCtx, offset int64) (int64, error) { + if root == nil { + panic("Dirent.Readdir: root must not be nil") + } + if dirCtx.Serializer == nil { + panic("Dirent.Readdir: serializer must not be nil") + } + if d.frozen { + return d.readdirFrozen(root, offset, dirCtx) + } + + // Check that this is actually a directory before emitting anything. + // Once we have written entries for "." and "..", future errors from + // IterateDir will be hidden. + if !IsDir(d.Inode.StableAttr) { + return 0, syserror.ENOTDIR + } + + // Collect attrs for "." and "..". + dot, dotdot := d.getDotAttrs(root) + + // Emit "." and ".." if the offset is low enough. + if offset == 0 { + // Serialize ".". + if err := dirCtx.DirEmit(".", dot); err != nil { + return offset, err + } + offset++ + } + if offset == 1 { + // Serialize "..". + if err := dirCtx.DirEmit("..", dotdot); err != nil { + return offset, err + } + offset++ + } + + // it.IterateDir should be passed an offset that does not include the + // initial dot elements. We will add them back later. + offset -= 2 + newOffset, err := it.IterateDir(ctx, dirCtx, int(offset)) + if int64(newOffset) < offset { + panic(fmt.Sprintf("node.Readdir returned offset %v less that input offset %v", offset, newOffset)) + } + // Add the initial nodes back to the offset count. + newOffset += 2 + return int64(newOffset), err +} + +// flush flushes all weak references recursively, and removes any cached +// references to children. +// +// Preconditions: d.mu must be held. +func (d *Dirent) flush() { + expired := make(map[string]*refs.WeakRef) + for n, w := range d.children { + // Call flush recursively on each child before removing our + // reference on it, and removing the cache's reference. + if child := w.Get(); child != nil { + cd := child.(*Dirent) + + if !cd.IsNegative() { + // Flush the child. + cd.mu.Lock() + cd.flush() + cd.mu.Unlock() + + // Allow the file system to drop extra references on child. + cd.dropExtendedReference() + } + + // Don't leak a reference. + child.DecRef() + } + // Check if the child dirent is closed, and mark it as expired if it is. + // We must call w.Get() again here, since the child could have been closed + // by the calls to flush() and cache.Remove() in the above if-block. + if child := w.Get(); child != nil { + child.DecRef() + } else { + expired[n] = w + } + } + + // Remove expired entries. + for n, w := range expired { + delete(d.children, n) + w.Drop() + } +} + +// Busy indicates whether this Dirent is a mount point or root dirent, or has +// active positive children. +// +// This is expensive, since it flushes the children cache. +// +// TODO: Fix this busy-ness check. +func (d *Dirent) Busy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + if d.mounted || d.parent == nil { + return true + } + + // Flush any cached references to children that are doomed. + d.flush() + + // Count positive children. + var nonNegative int + for _, w := range d.children { + if child := w.Get(); child != nil { + if !child.(*Dirent).IsNegative() { + nonNegative++ + } + child.DecRef() + } + } + return nonNegative > 0 +} + +// mount mounts a new dirent with the given inode over d. +// +// Precondition: must be called with mm.withMountLocked held on `d`. +func (d *Dirent) mount(ctx context.Context, inode *Inode) (newChild *Dirent, err error) { + // Did we race with deletion? + if atomic.LoadInt32(&d.deleted) != 0 { + return nil, syserror.ENOENT + } + + // Refuse to mount a symlink. + // + // See Linux equivalent in fs/namespace.c:do_add_mount. + if IsSymlink(inode.StableAttr) { + return nil, syserror.EINVAL + } + + // Are we frozen? + if d.parent.frozen && !d.parent.Inode.IsVirtual() { + return nil, syserror.ENOENT + } + + // Dirent that'll replace d. + // + // Note that NewDirent returns with one reference taken; the reference + // is donated to the caller as the mount reference. + replacement := NewDirent(inode, d.name) + replacement.mounted = true + + weakRef, ok := d.parent.hashChild(replacement) + if !ok { + panic("mount must mount over an existing dirent") + } + weakRef.Drop() + + // Note that even though `d` is now hidden, it still holds a reference + // to its parent. + return replacement, nil +} + +// unmount unmounts `d` and replaces it with the last Dirent that was in its +// place, supplied by the MountNamespace as `replacement`. +// +// Precondition: must be called with mm.withMountLocked held on `d`. +func (d *Dirent) unmount(ctx context.Context, replacement *Dirent) error { + // Did we race with deletion? + if atomic.LoadInt32(&d.deleted) != 0 { + return syserror.ENOENT + } + + // Are we frozen? + if d.parent.frozen && !d.parent.Inode.IsVirtual() { + return syserror.ENOENT + } + + // Remount our former child in its place. + // + // As replacement used to be our child, it must already have the right + // parent. + weakRef, ok := d.parent.hashChildParentSet(replacement) + if !ok { + panic("mount must mount over an existing dirent") + } + weakRef.Drop() + + // d is not reachable anymore, and hence not mounted anymore. + d.mounted = false + + // Drop mount reference. + d.DecRef() + return nil +} + +// Remove removes the given file or symlink. The root dirent is used to +// resolve name, and must not be nil. +func (d *Dirent) Remove(ctx context.Context, root *Dirent, name string) error { + // Check the root. + if root == nil { + panic("Dirent.Remove: root must not be nil") + } + + unlock := d.lockDirectory() + defer unlock() + + // Are we frozen? + if d.frozen && !d.Inode.IsVirtual() { + return syscall.ENOENT + } + + // Try to walk to the node. + child, err := d.walk(ctx, root, name, false /* may unlock */) + if err != nil { + // Child does not exist. + return err + } + defer child.DecRef() + + // Remove cannot remove directories. + if IsDir(child.Inode.StableAttr) { + return syscall.EISDIR + } + + // Remove cannot remove a mount point. + if child.Busy() { + return syscall.EBUSY + } + + // Try to remove name on the file system. + if err := d.Inode.Remove(ctx, d, child); err != nil { + return err + } + + // Link count changed, this only applies to non-directory nodes. + child.Inode.Watches.Notify("", linux.IN_ATTRIB, 0) + + // Mark name as deleted and remove from children. + atomic.StoreInt32(&child.deleted, 1) + if w, ok := d.children[name]; ok { + delete(d.children, name) + w.Drop() + } + + // Allow the file system to drop extra references on child. + child.dropExtendedReference() + + // Finally, let inotify know the child is being unlinked. Drop any extra + // refs from inotify to this child dirent. This doesn't necessarily mean the + // watches on the underlying inode will be destroyed, since the underlying + // inode may have other links. If this was the last link, the events for the + // watch removal will be queued by the inode destructor. + child.Inode.Watches.MarkUnlinked() + child.Inode.Watches.Unpin(child) + d.Inode.Watches.Notify(name, linux.IN_DELETE, 0) + + return nil +} + +// RemoveDirectory removes the given directory. The root dirent is used to +// resolve name, and must not be nil. +func (d *Dirent) RemoveDirectory(ctx context.Context, root *Dirent, name string) error { + // Check the root. + if root == nil { + panic("Dirent.Remove: root must not be nil") + } + + unlock := d.lockDirectory() + defer unlock() + + // Are we frozen? + if d.frozen && !d.Inode.IsVirtual() { + return syscall.ENOENT + } + + // Check for dots. + if name == "." { + // Rejected as the last component by rmdir(2). + return syscall.EINVAL + } + if name == ".." { + // If d was found, then its parent is not empty. + return syscall.ENOTEMPTY + } + + // Try to walk to the node. + child, err := d.walk(ctx, root, name, false /* may unlock */) + if err != nil { + // Child does not exist. + return err + } + defer child.DecRef() + + // RemoveDirectory can only remove directories. + if !IsDir(child.Inode.StableAttr) { + return syscall.ENOTDIR + } + + // Remove cannot remove a mount point. + if child.Busy() { + return syscall.EBUSY + } + + // Try to remove name on the file system. + if err := d.Inode.Remove(ctx, d, child); err != nil { + return err + } + + // Mark name as deleted and remove from children. + atomic.StoreInt32(&child.deleted, 1) + if w, ok := d.children[name]; ok { + delete(d.children, name) + w.Drop() + } + + // Allow the file system to drop extra references on child. + child.dropExtendedReference() + + // Finally, let inotify know the child is being unlinked. Drop any extra + // refs from inotify to this child dirent. + child.Inode.Watches.MarkUnlinked() + child.Inode.Watches.Unpin(child) + d.Inode.Watches.Notify(name, linux.IN_ISDIR|linux.IN_DELETE, 0) + + return nil +} + +// destroy closes this node and all children. +func (d *Dirent) destroy() { + if d.IsNegative() { + // Nothing to tear-down and no parent references to drop, since a negative + // Dirent does not take a references on its parent, has no Inode and no children. + return + } + + var wg sync.WaitGroup + defer wg.Wait() + d.mu.Lock() + defer d.mu.Unlock() + + // Drop all weak references. + for _, w := range d.children { + w.Drop() + } + d.children = nil + + allDirents.remove(d) + + // Drop our reference to the Inode. + d.Inode.DecRef() + + // Allow the Dirent to be GC'ed after this point, since the Inode may still + // be referenced after the Dirent is destroyed (for instance by filesystem + // internal caches or hard links). + d.Inode = nil + + // Drop the reference we have on our parent if we took one. renameMu doesn't need to be + // held because d can't be reparented without any references to it left. + if d.parent != nil { + d.parent.DecRef() + } +} + +// IncRef increases the Dirent's refcount as well as its mount's refcount. +// +// IncRef implements RefCounter.IncRef. +func (d *Dirent) IncRef() { + if d.Inode != nil { + d.Inode.MountSource.IncDirentRefs() + } + d.AtomicRefCount.IncRef() +} + +// TryIncRef implements RefCounter.TryIncRef. +func (d *Dirent) TryIncRef() bool { + ok := d.AtomicRefCount.TryIncRef() + if ok && d.Inode != nil { + d.Inode.MountSource.IncDirentRefs() + } + return ok +} + +// DecRef decreases the Dirent's refcount and drops its reference on its mount. +// +// DecRef implements RefCounter.DecRef with destructor d.destroy. +func (d *Dirent) DecRef() { + if d.Inode != nil { + // Keep mount around, since DecRef may destroy d.Inode. + msrc := d.Inode.MountSource + d.DecRefWithDestructor(d.destroy) + msrc.DecDirentRefs() + } else { + d.DecRefWithDestructor(d.destroy) + } +} + +// InotifyEvent notifies all watches on the inode for this dirent and its parent +// of potential events. The events may not actually propagate up to the user, +// depending on the event masks. InotifyEvent automatically provides the name of +// the current dirent as the subject of the event as required, and adds the +// IN_ISDIR flag for dirents that refer to directories. +func (d *Dirent) InotifyEvent(events, cookie uint32) { + // N.B. We don't defer the unlocks because InotifyEvent is in the hot + // path of all IO operations, and the defers cost too much for small IO + // operations. + renameMu.RLock() + + if IsDir(d.Inode.StableAttr) { + events |= linux.IN_ISDIR + } + + // The ordering below is important, Linux always notifies the parent first. + if d.parent != nil { + d.parent.Inode.Watches.Notify(d.name, events, cookie) + } + d.Inode.Watches.Notify("", events, cookie) + + renameMu.RUnlock() +} + +// maybeExtendReference caches a reference on this Dirent if +// MountSourceOperations.Keep returns true. +func (d *Dirent) maybeExtendReference() { + if msrc := d.Inode.MountSource; msrc.Keep(d) { + msrc.fscache.Add(d) + } +} + +// dropExtendedReference drops any cached reference held by the +// MountSource on the dirent. +func (d *Dirent) dropExtendedReference() { + d.Inode.MountSource.fscache.Remove(d) +} + +// lockForRename takes locks on oldParent and newParent as required by Rename +// and returns a function that will unlock the locks taken. The returned +// function must be called even if a non-nil error is returned. +func lockForRename(oldParent *Dirent, oldName string, newParent *Dirent, newName string) (func(), error) { + if oldParent == newParent { + oldParent.mu.Lock() + return oldParent.mu.Unlock, nil + } + + // Renaming between directories is a bit subtle: + // + // - A concurrent cross-directory Rename may try to lock in the opposite + // order; take renameMu to prevent this from happening. + // + // - If either directory is an ancestor of the other, then a concurrent + // Remove may lock the descendant (in DecRef -> closeAll) while holding a + // lock on the ancestor; to avoid this, ensure we take locks in the same + // ancestor-to-descendant order. (Holding renameMu prevents this + // relationship from changing.) + renameMu.Lock() + + // First check if newParent is a descendant of oldParent. + child := newParent + for p := newParent.parent; p != nil; p = p.parent { + if p == oldParent { + oldParent.mu.Lock() + newParent.mu.Lock() + var err error + if child.name == oldName { + // newParent is not just a descendant of oldParent, but + // more specifically of oldParent/oldName. That is, we're + // trying to rename something into a subdirectory of + // itself. + err = syscall.EINVAL + } + return func() { + newParent.mu.Unlock() + oldParent.mu.Unlock() + renameMu.Unlock() + }, err + } + child = p + } + + // Otherwise, either oldParent is a descendant of newParent or the two + // have no relationship; in either case we can do this: + newParent.mu.Lock() + oldParent.mu.Lock() + return func() { + oldParent.mu.Unlock() + newParent.mu.Unlock() + renameMu.Unlock() + }, nil +} + +func checkSticky(ctx context.Context, dir *Dirent, victim *Dirent) error { + uattr, err := dir.Inode.UnstableAttr(ctx) + if err != nil { + return syserror.EPERM + } + if !uattr.Perms.Sticky { + return nil + } + + creds := auth.CredentialsFromContext(ctx) + if uattr.Owner.UID == creds.EffectiveKUID { + return nil + } + + vuattr, err := victim.Inode.UnstableAttr(ctx) + if err != nil { + return syserror.EPERM + } + if vuattr.Owner.UID == creds.EffectiveKUID { + return nil + } + if victim.Inode.CheckCapability(ctx, linux.CAP_FOWNER) { + return nil + } + return syserror.EPERM +} + +// MayDelete determines whether `name`, a child of `dir`, can be deleted or +// renamed by `ctx`. +// +// Compare Linux kernel fs/namei.c:may_delete. +func MayDelete(ctx context.Context, root, dir *Dirent, name string) error { + victim, err := dir.Walk(ctx, root, name) + if err != nil { + return err + } + defer victim.DecRef() + + return mayDelete(ctx, dir, victim) +} + +func mayDelete(ctx context.Context, dir *Dirent, victim *Dirent) error { + if err := dir.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil { + return err + } + + return checkSticky(ctx, dir, victim) +} + +// Rename atomically converts the child of oldParent named oldName to a +// child of newParent named newName. +func Rename(ctx context.Context, root *Dirent, oldParent *Dirent, oldName string, newParent *Dirent, newName string) error { + if root == nil { + panic("Rename: root must not be nil") + } + if oldParent == newParent && oldName == newName { + return nil + } + + // Acquire global renameMu lock, and mu locks on oldParent/newParent. + unlock, err := lockForRename(oldParent, oldName, newParent, newName) + defer unlock() + if err != nil { + return err + } + + // Are we frozen? + // TODO: Is this the right errno? + if oldParent.frozen && !oldParent.Inode.IsVirtual() { + return syscall.ENOENT + } + if newParent.frozen && !newParent.Inode.IsVirtual() { + return syscall.ENOENT + } + + // Check constraints on the object being renamed. + renamed, err := oldParent.walk(ctx, root, oldName, false /* may unlock */) + if err != nil { + return err + } + defer renamed.DecRef() + + // Make sure we have write permissions on old and new parent. + if err := mayDelete(ctx, oldParent, renamed); err != nil { + return err + } + if newParent != oldParent { + if err := newParent.Inode.CheckPermission(ctx, PermMask{Write: true, Execute: true}); err != nil { + return err + } + } + + // Source should not be an ancestor of the target. + if renamed == newParent { + return syscall.EINVAL + } + + // Is the thing we're trying to rename busy? + if renamed.Busy() { + return syscall.EBUSY + } + + // Per rename(2): "... EACCES: ... or oldpath is a directory and does not + // allow write permission (needed to update the .. entry)." + if IsDir(renamed.Inode.StableAttr) { + if err := renamed.Inode.CheckPermission(ctx, PermMask{Write: true}); err != nil { + return err + } + } + + // Check constraints on the object being replaced, if any. + replaced, err := newParent.walk(ctx, root, newName, false /* may unlock */) + if err == nil { + defer replaced.DecRef() + + // Target should not be an ancestor of source. + if replaced == oldParent { + // Why is this not EINVAL? See fs/namei.c. + return syscall.ENOTEMPTY + } + + // Is the thing we're trying to replace busy? + if replaced.Busy() { + return syscall.EBUSY + } + + // Require that a directory is replaced by a directory. + oldIsDir := IsDir(renamed.Inode.StableAttr) + newIsDir := IsDir(replaced.Inode.StableAttr) + if !newIsDir && oldIsDir { + return syscall.ENOTDIR + } + if !oldIsDir && newIsDir { + return syscall.EISDIR + } + + // Allow the file system to drop extra references on replaced. + replaced.dropExtendedReference() + + // NOTE: Keeping a dirent + // open across renames is currently broken for multiple + // reasons, so we flush all references on the replaced node and + // its children. + replaced.Inode.Watches.Unpin(replaced) + replaced.flush() + } + + if err := renamed.Inode.Rename(ctx, oldParent, renamed, newParent, newName); err != nil { + return err + } + + renamed.name = newName + renamed.parent = newParent + if oldParent != newParent { + // Reparent the reference held by renamed.parent. oldParent.DecRef + // can't destroy oldParent (and try to retake its lock) because + // Rename's caller must be holding a reference. + newParent.IncRef() + oldParent.DecRef() + } + if w, ok := newParent.children[newName]; ok { + w.Drop() + delete(newParent.children, newName) + } + if w, ok := oldParent.children[oldName]; ok { + w.Drop() + delete(oldParent.children, oldName) + } + + // Add a weak reference from the new parent. This ensures that the child + // can still be found from the new parent if a prior hard reference is + // held on renamed. + // + // This is required for file lock correctness because file locks are per-Dirent + // and without maintaining the a cached child (via a weak reference) for renamed, + // multiple Dirents can correspond to the same resource (by virtue of the renamed + // Dirent being unreachable by its parent and it being looked up). + newParent.children[newName] = refs.NewWeakRef(renamed, nil) + + // Queue inotify events for the rename. + var ev uint32 + if IsDir(renamed.Inode.StableAttr) { + ev |= linux.IN_ISDIR + } + + cookie := uniqueid.InotifyCookie(ctx) + oldParent.Inode.Watches.Notify(oldName, ev|linux.IN_MOVED_FROM, cookie) + newParent.Inode.Watches.Notify(newName, ev|linux.IN_MOVED_TO, cookie) + // Somewhat surprisingly, self move events do not have a cookie. + renamed.Inode.Watches.Notify("", linux.IN_MOVE_SELF, 0) + + // Allow the file system to drop extra references on renamed. + renamed.dropExtendedReference() + + // Same as replaced.flush above. + renamed.flush() + + return nil +} diff --git a/pkg/sentry/fs/dirent_cache.go b/pkg/sentry/fs/dirent_cache.go new file mode 100644 index 000000000..e786e4f65 --- /dev/null +++ b/pkg/sentry/fs/dirent_cache.go @@ -0,0 +1,142 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync" +) + +// DirentCache is an LRU cache of Dirents. The Dirent's refCount is +// incremented when it is added to the cache, and decremented when it is +// removed. +// +// A nil DirentCache corresponds to a cache with size 0. All methods can be +// called, but nothing is actually cached. +type DirentCache struct { + // Maximum size of the cache. This must be saved manually, to handle the case + // when cache is nil. + maxSize uint64 + + // mu protects currentSize and direntList. + mu sync.Mutex `state:"nosave"` + + // currentSize is the number of elements in the cache. It must be zero (i.e. + // the cache must be empty) on Save. + currentSize uint64 `state:"zerovalue"` + + // list is a direntList, an ilist of Dirents. New Dirents are added + // to the front of the list. Old Dirents are removed from the back of + // the list. It must be zerovalue (i.e. the cache must be empty) on Save. + list direntList `state:"zerovalue"` +} + +// NewDirentCache returns a new DirentCache with the given maxSize. If maxSize +// is 0, nil is returned. +func NewDirentCache(maxSize uint64) *DirentCache { + return &DirentCache{ + maxSize: maxSize, + } +} + +// Add adds the element to the cache and increments the refCount. If the +// argument is already in the cache, it is moved to the front. An element is +// removed from the back if the cache is over capacity. +func (c *DirentCache) Add(d *Dirent) { + if c == nil || c.maxSize == 0 { + return + } + + c.mu.Lock() + if c.contains(d) { + // d is already in cache. Bump it to the front. + // currentSize and refCount are unaffected. + c.list.Remove(d) + c.list.PushFront(d) + c.mu.Unlock() + return + } + + // d is not in cache. Add it and take a reference. + c.list.PushFront(d) + d.IncRef() + c.currentSize++ + + // Remove the oldest until we are under the size limit. + for c.maxSize > 0 && c.currentSize > c.maxSize { + c.remove(c.list.Back()) + } + c.mu.Unlock() +} + +func (c *DirentCache) remove(d *Dirent) { + if !c.contains(d) { + panic(fmt.Sprintf("trying to remove %v, which is not in the dirent cache", d)) + } + c.list.Remove(d) + d.SetPrev(nil) + d.SetNext(nil) + d.DecRef() + c.currentSize-- +} + +// Remove removes the element from the cache and decrements its refCount. It +// also sets the previous and next elements to nil, which allows us to +// determine if a given element is in the cache. +func (c *DirentCache) Remove(d *Dirent) { + if c == nil || c.maxSize == 0 { + return + } + c.mu.Lock() + if !c.contains(d) { + c.mu.Unlock() + return + } + c.remove(d) + c.mu.Unlock() +} + +// Size returns the number of elements in the cache. +func (c *DirentCache) Size() uint64 { + if c == nil { + return 0 + } + c.mu.Lock() + size := c.currentSize + c.mu.Unlock() + return size +} + +func (c *DirentCache) contains(d *Dirent) bool { + // If d has a Prev or Next element, then it is in the cache. + if d.Prev() != nil || d.Next() != nil { + return true + } + // Otherwise, d is in the cache if it is the only element (and thus the + // first element). + return c.list.Front() == d +} + +// Invalidate removes all Dirents from the cache, caling DecRef on each. +func (c *DirentCache) Invalidate() { + if c == nil { + return + } + c.mu.Lock() + for c.list.Front() != nil { + c.remove(c.list.Front()) + } + c.mu.Unlock() +} diff --git a/pkg/sentry/fs/dirent_cache_test.go b/pkg/sentry/fs/dirent_cache_test.go new file mode 100644 index 000000000..82b7f6bd5 --- /dev/null +++ b/pkg/sentry/fs/dirent_cache_test.go @@ -0,0 +1,157 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "testing" +) + +func TestDirentCache(t *testing.T) { + const maxSize = 5 + + c := NewDirentCache(maxSize) + + // Size starts at 0. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Create a Dirent d. + d := NewNegativeDirent("") + + // c does not contain d. + if got, want := c.contains(d), false; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add d to the cache. + c.Add(d) + + // Size is now 1. + if got, want := c.Size(), uint64(1); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add maxSize-1 more elements. d should be oldest element. + for i := 0; i < maxSize-1; i++ { + c.Add(NewNegativeDirent("")) + } + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // "Bump" d to the front by re-adding it. + c.Add(d) + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add maxSize-1 more elements. d should again be oldest element. + for i := 0; i < maxSize-1; i++ { + c.Add(NewNegativeDirent("")) + } + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c contains d. + if got, want := c.contains(d), true; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Add one more element, which will bump d from the cache. + c.Add(NewNegativeDirent("")) + + // Size is maxSize. + if got, want := c.Size(), uint64(maxSize); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // c does not contain d. + if got, want := c.contains(d), false; got != want { + t.Errorf("c.contains(d) got %v want %v", got, want) + } + + // Invalidating causes size to be 0 and list to be empty. + c.Invalidate() + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + if got, want := c.list.Empty(), true; got != want { + t.Errorf("c.list.Empty() got %v, want %v", got, want) + } + + // Fill cache with maxSize dirents. + for i := 0; i < maxSize; i++ { + c.Add(NewNegativeDirent("")) + } +} + +// TestNilDirentCache tests that a nil cache supports all cache operations, but +// treats them as noop. +func TestNilDirentCache(t *testing.T) { + // Create a nil cache. + var c *DirentCache + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Add. + c.Add(NewNegativeDirent("")) + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Remove. + c.Remove(NewNegativeDirent("")) + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } + + // Call Invalidate. + c.Invalidate() + + // Size is zero. + if got, want := c.Size(), uint64(0); got != want { + t.Errorf("c.Size() got %v, want %v", got, want) + } +} diff --git a/pkg/sentry/fs/dirent_refs_test.go b/pkg/sentry/fs/dirent_refs_test.go new file mode 100644 index 000000000..8ce9ba02d --- /dev/null +++ b/pkg/sentry/fs/dirent_refs_test.go @@ -0,0 +1,417 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" +) + +func newMockDirInode(ctx context.Context, cache *DirentCache) *Inode { + return NewMockInode(ctx, NewMockMountSource(cache), StableAttr{Type: Directory}) +} + +func TestWalkPositive(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(newMockDirInode(ctx, nil), "root") + + if got := root.TestReadRefs(); got != 0 { + t.Fatalf("root has a ref count of %d, want %d", got, 0) + } + + name := "d" + d, err := root.walk(ctx, root, name, false) + if err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + + if got := root.TestReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + if got := d.TestReadRefs(); got != 0 { + t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, 0) + } + + d.DecRef() + + if got := root.TestReadRefs(); got != 0 { + t.Fatalf("root has a ref count of %d, want %d", got, 0) + } + + if got := d.TestReadRefs(); got != -1 { + t.Fatalf("child name = %q has a ref count of %d, want %d", d.name, got, -1) + } + + root.flush() + + if got := len(root.children); got != 0 { + t.Fatalf("root has %d children, want %d", got, 0) + } +} + +func TestWalkNegative(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(NewEmptyDir(ctx, nil), "root") + mn := root.Inode.InodeOperations.(*mockInodeOperationsLookupNegative) + + if got := root.TestReadRefs(); got != 0 { + t.Fatalf("root has a ref count of %d, want %d", got, 0) + } + + name := "d" + for i := 0; i < 100; i++ { + _, err := root.walk(ctx, root, name, false) + if err != syscall.ENOENT { + t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, syscall.ENOENT) + } + } + + if got := root.TestReadRefs(); got != 0 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + if got := len(root.children); got != 1 { + t.Fatalf("root has %d children, want %d", got, 1) + } + + w, ok := root.children[name] + if !ok { + t.Fatalf("root wants child at %q", name) + } + + child := w.Get() + if child == nil { + t.Fatalf("root wants to resolve weak reference") + } + + if !child.(*Dirent).IsNegative() { + t.Fatalf("root found positive child at %q, want negative", name) + } + + if got := child.(*Dirent).TestReadRefs(); got != 1 { + t.Fatalf("child has a ref count of %d, want %d", got, 1) + } + + child.DecRef() + + if got := child.(*Dirent).TestReadRefs(); got != 0 { + t.Fatalf("child has a ref count of %d, want %d", got, 0) + } + + if got := len(root.children); got != 1 { + t.Fatalf("root has %d children, want %d", got, 1) + } + + root.DecRef() + + if got := root.TestReadRefs(); got != -1 { + t.Fatalf("root has a ref count of %d, want %d", got, 0) + } + + AsyncBarrier() + + if got := mn.releaseCalled; got != true { + t.Fatalf("root.Close was called %v, want true", got) + } +} + +type mockInodeOperationsLookupNegative struct { + *MockInodeOperations + releaseCalled bool +} + +func NewEmptyDir(ctx context.Context, cache *DirentCache) *Inode { + m := NewMockMountSource(cache) + return NewInode(&mockInodeOperationsLookupNegative{ + MockInodeOperations: NewMockInodeOperations(ctx), + }, m, StableAttr{Type: Directory}) +} + +func (m *mockInodeOperationsLookupNegative) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + return NewNegativeDirent(p), nil +} + +func (m *mockInodeOperationsLookupNegative) Release(context.Context) { + m.releaseCalled = true +} + +func TestHashNegativeToPositive(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + root := NewDirent(NewEmptyDir(ctx, nil), "root") + + name := "d" + _, err := root.walk(ctx, root, name, false) + if err != syscall.ENOENT { + t.Fatalf("root.walk(root, %q) got %v, want %v", name, err, syscall.ENOENT) + } + + if got := root.exists(ctx, root, name); got != false { + t.Fatalf("got %q exists, want does not exist", name) + } + + f, err := root.Create(ctx, root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(%q, _), got error %v, want nil", name, err) + } + d := f.Dirent + + if d.IsNegative() { + t.Fatalf("got negative Dirent, want positive") + } + + if got := d.TestReadRefs(); got != 0 { + t.Fatalf("child %q has a ref count of %d, want %d", name, got, 0) + } + + if got := root.TestReadRefs(); got != 1 { + t.Fatalf("root has a ref count of %d, want %d", got, 1) + } + + if got := len(root.children); got != 1 { + t.Fatalf("got %d children, want %d", got, 1) + } + + w, ok := root.children[name] + if !ok { + t.Fatalf("failed to find weak reference to %q", name) + } + + child := w.Get() + if child == nil { + t.Fatalf("want to resolve weak reference") + } + + if child.(*Dirent) != d { + t.Fatalf("got foreign child") + } +} + +func TestRevalidate(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // Whether to make negative Dirents. + makeNegative bool + }{ + { + desc: "Revalidate negative Dirent", + makeNegative: true, + }, + { + desc: "Revalidate positive Dirent", + makeNegative: false, + }, + } { + t.Run(test.desc, func(t *testing.T) { + root := NewDirent(NewMockInodeRevalidate(ctx, test.makeNegative), "root") + + name := "d" + d1, err := root.walk(ctx, root, name, false) + if !test.makeNegative && err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + d2, err := root.walk(ctx, root, name, false) + if !test.makeNegative && err != nil { + t.Fatalf("root.walk(root, %q) got %v, want nil", name, err) + } + if !test.makeNegative && d1 == d2 { + t.Fatalf("revalidating walk got same *Dirent, want different") + } + if got := len(root.children); got != 1 { + t.Errorf("revalidating walk got %d children, want %d", got, 1) + } + }) + } +} + +type MockInodeOperationsRevalidate struct { + *MockInodeOperations + makeNegative bool +} + +func NewMockInodeRevalidate(ctx context.Context, makeNegative bool) *Inode { + mn := NewMockInodeOperations(ctx) + m := NewMockMountSource(nil) + m.MountSourceOperations.(*MockMountSourceOps).revalidate = true + return NewInode(&MockInodeOperationsRevalidate{MockInodeOperations: mn, makeNegative: makeNegative}, m, StableAttr{Type: Directory}) +} + +func (m *MockInodeOperationsRevalidate) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + if !m.makeNegative { + return m.MockInodeOperations.Lookup(ctx, dir, p) + } + return NewNegativeDirent(p), nil +} + +func TestCreateExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // root is the Dirent to create from. + root *Dirent + + // expected references on walked Dirent. + refs int64 + }{ + { + desc: "Create caching", + root: NewDirent(NewEmptyDir(ctx, NewDirentCache(1)), "root"), + refs: 1, + }, + { + desc: "Create not caching", + root: NewDirent(NewEmptyDir(ctx, nil), "root"), + refs: 0, + }, + } { + t.Run(test.desc, func(t *testing.T) { + name := "d" + f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(root, %q) failed: %v", name, err) + } + d := f.Dirent + + if got := d.TestReadRefs(); got != test.refs { + t.Errorf("dirent has a ref count of %d, want %d", got, test.refs) + } + }) + } +} + +func TestRemoveExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // root is the Dirent to make and remove from. + root *Dirent + }{ + { + desc: "Remove caching", + root: NewDirent(NewEmptyDir(ctx, NewDirentCache(1)), "root"), + }, + { + desc: "Remove not caching", + root: NewDirent(NewEmptyDir(ctx, nil), "root"), + }, + } { + t.Run(test.desc, func(t *testing.T) { + name := "d" + f, err := test.root.Create(ctx, test.root, name, FileFlags{}, FilePermissions{}) + if err != nil { + t.Fatalf("root.Create(%q, _) failed: %v", name, err) + } + d := f.Dirent + + if err := test.root.Remove(contexttest.Context(t), test.root, name); err != nil { + t.Fatalf("root.Remove(root, %q) failed: %v", name, err) + } + + if got := d.TestReadRefs(); got != 0 { + t.Fatalf("dirent has a ref count of %d, want %d", got, 0) + } + + d.DecRef() + + test.root.flush() + + if got := len(test.root.children); got != 0 { + t.Errorf("root has %d children, want %d", got, 0) + } + }) + } +} + +func TestRenameExtraRefs(t *testing.T) { + // refs == 0 -> one reference. + // refs == -1 -> has been destroyed. + + ctx := contexttest.Context(t) + for _, test := range []struct { + // desc is the test's description. + desc string + + // cache of extra Dirent references, may be nil. + cache *DirentCache + }{ + { + desc: "Rename no caching", + cache: nil, + }, + { + desc: "Rename caching", + cache: NewDirentCache(5), + }, + } { + t.Run(test.desc, func(t *testing.T) { + dirAttr := StableAttr{Type: Directory} + + oldParent := NewDirent(NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "old_parent") + newParent := NewDirent(NewMockInode(ctx, NewMockMountSource(test.cache), dirAttr), "new_parent") + + renamed, err := oldParent.Walk(ctx, oldParent, "old_child") + if err != nil { + t.Fatalf("Walk(oldParent, %q) got error %v, want nil", "old_child", err) + } + replaced, err := newParent.Walk(ctx, oldParent, "new_child") + if err != nil { + t.Fatalf("Walk(newParent, %q) got error %v, want nil", "new_child", err) + } + + if err := Rename(contexttest.RootContext(t), oldParent /*root */, oldParent, "old_child", newParent, "new_child"); err != nil { + t.Fatalf("Rename got error %v, want nil", err) + } + + oldParent.flush() + newParent.flush() + + // Expect to have only active references. + if got := renamed.TestReadRefs(); got != 0 { + t.Errorf("renamed has ref count %d, want only active references %d", got, 0) + } + if got := replaced.TestReadRefs(); got != 0 { + t.Errorf("replaced has ref count %d, want only active references %d", got, 0) + } + }) + } +} diff --git a/pkg/sentry/fs/dirent_state.go b/pkg/sentry/fs/dirent_state.go new file mode 100644 index 000000000..c6a1b5e38 --- /dev/null +++ b/pkg/sentry/fs/dirent_state.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync/atomic" +) + +// beforeSave is invoked by stateify. +func (d *Dirent) beforeSave() { + // Refuse to save if the file has already been deleted (but still has + // open fds, which is why the Dirent is still accessible). We know the + // the restore opening of the file will always fail. This condition will + // last until all the open fds and this Dirent are closed and released. + // + // Note that this is rejection rather than failure---it would be + // perfectly OK to save---we are simply disallowing it here to prevent + // generating non-restorable state dumps. As the program continues its + // execution, it may become allowed to save again. + if atomic.LoadInt32(&d.deleted) != 0 { + n, _ := d.FullName(nil /* root */) + panic(ErrSaveRejection{fmt.Errorf("deleted file %q still has open fds", n)}) + } +} + +// afterLoad is invoked by stateify. +func (d *Dirent) afterLoad() { + if d.userVisible { + allDirents.add(d) + } +} diff --git a/pkg/sentry/fs/fdpipe/BUILD b/pkg/sentry/fs/fdpipe/BUILD new file mode 100644 index 000000000..9e1f65d3e --- /dev/null +++ b/pkg/sentry/fs/fdpipe/BUILD @@ -0,0 +1,76 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "pipe_state", + srcs = [ + "pipe.go", + "pipe_state.go", + ], + out = "pipe_autogen_state.go", + imports = ["gvisor.googlesource.com/gvisor/pkg/sentry/fs"], + package = "fdpipe", +) + +go_library( + name = "fdpipe", + srcs = [ + "pipe.go", + "pipe_autogen_state.go", + "pipe_opener.go", + "pipe_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fdpipe", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/fd", + "//pkg/log", + "//pkg/metric", + "//pkg/p9", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/lock", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/unet", + "//pkg/waiter", + "//pkg/waiter/fdnotifier", + ], +) + +go_test( + name = "fdpipe_test", + size = "small", + srcs = [ + "pipe_opener_test.go", + "pipe_test.go", + ], + embed = [":fdpipe"], + deps = [ + "//pkg/fd", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/usermem", + "//pkg/syserror", + "//pkg/waiter/fdnotifier", + "@com_github_google_uuid//:go_default_library", + ], +) diff --git a/pkg/sentry/fs/fdpipe/pipe.go b/pkg/sentry/fs/fdpipe/pipe.go new file mode 100644 index 000000000..f7bbd4aff --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe.go @@ -0,0 +1,167 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fdpipe implements common namedpipe opening and accessing logic. +package fdpipe + +import ( + "os" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/secio" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +// pipeOperations are the fs.FileOperations of a host pipe. +type pipeOperations struct { + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + waiter.Queue `state:"nosave"` + + // flags are the flags used to open the pipe. + flags fs.FileFlags `state:".(fs.FileFlags)"` + + // opener is how the pipe was opened. + opener NonBlockingOpener `state:"wait"` + + // file represents the host pipe. + file *fd.FD `state:"nosave"` + + // mu protects readAheadBuffer access below. + mu sync.Mutex `state:"nosave"` + + // readAheadBuffer contains read bytes that have not yet been read + // by the application but need to be buffered for save-restore for correct + // opening semantics. The readAheadBuffer will only be non-empty when the + // is first opened and will be drained by subsequent reads on the pipe. + readAheadBuffer []byte +} + +// newPipeOperations returns an implementation of fs.FileOperations for a pipe. +func newPipeOperations(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags, file *fd.FD, readAheadBuffer []byte) (*pipeOperations, error) { + pipeOps := &pipeOperations{ + flags: flags, + opener: opener, + file: file, + readAheadBuffer: readAheadBuffer, + } + if err := pipeOps.init(); err != nil { + return nil, err + } + return pipeOps, nil +} + +// init initializes p.file. +func (p *pipeOperations) init() error { + var s syscall.Stat_t + if err := syscall.Fstat(p.file.FD(), &s); err != nil { + log.Warningf("pipe: cannot stat fd %d: %v", p.file.FD(), err) + return syscall.EINVAL + } + if s.Mode&syscall.S_IFIFO != syscall.S_IFIFO { + log.Warningf("pipe: cannot load fd %d as pipe, file type: %o", p.file.FD(), s.Mode) + return syscall.EINVAL + } + if err := syscall.SetNonblock(p.file.FD(), true); err != nil { + return err + } + if err := fdnotifier.AddFD(int32(p.file.FD()), &p.Queue); err != nil { + return err + } + return nil +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (p *pipeOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + p.Queue.EventRegister(e, mask) + fdnotifier.UpdateFD(int32(p.file.FD())) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (p *pipeOperations) EventUnregister(e *waiter.Entry) { + p.Queue.EventUnregister(e) + fdnotifier.UpdateFD(int32(p.file.FD())) +} + +// Readiness returns a mask of ready events for stream. +func (p *pipeOperations) Readiness(mask waiter.EventMask) (eventMask waiter.EventMask) { + return fdnotifier.NonBlockingPoll(int32(p.file.FD()), mask) +} + +// Release implements fs.FileOperations.Release. +func (p *pipeOperations) Release() { + fdnotifier.RemoveFD(int32(p.file.FD())) + p.file.Close() + p.file = nil +} + +// Read implements fs.FileOperations.Read. +func (p *pipeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + // Drain the read ahead buffer, if it contains anything first. + var bufN int + var bufErr error + p.mu.Lock() + if len(p.readAheadBuffer) > 0 { + bufN, bufErr = dst.CopyOut(ctx, p.readAheadBuffer) + p.readAheadBuffer = p.readAheadBuffer[bufN:] + dst = dst.DropFirst(bufN) + } + p.mu.Unlock() + if dst.NumBytes() == 0 || bufErr != nil { + return int64(bufN), bufErr + } + + // Pipes expect full reads. + n, err := dst.CopyOutFrom(ctx, safemem.FromIOReader{secio.FullReader{p.file}}) + total := int64(bufN) + n + if err != nil && isBlockError(err) { + return total, syserror.ErrWouldBlock + } + return total, err +} + +// Write implements fs.FileOperations.Write. +func (p *pipeOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + n, err := src.CopyInTo(ctx, safemem.FromIOWriter{p.file}) + if err != nil && isBlockError(err) { + return n, syserror.ErrWouldBlock + } + return n, err +} + +// isBlockError unwraps os errors and checks if they are caused by EAGAIN or +// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock. +func isBlockError(err error) bool { + if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK { + return true + } + if pe, ok := err.(*os.PathError); ok { + return isBlockError(pe.Err) + } + return false +} diff --git a/pkg/sentry/fs/fdpipe/pipe_opener.go b/pkg/sentry/fs/fdpipe/pipe_opener.go new file mode 100644 index 000000000..a0d59575f --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_opener.go @@ -0,0 +1,193 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "io" + "os" + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// NonBlockingOpener is a generic host file opener used to retry opening host +// pipes if necessary. +type NonBlockingOpener interface { + // NonBlockingOpen tries to open a host pipe in a non-blocking way, + // and otherwise returns an error. Implementations should be idempotent. + NonBlockingOpen(context.Context, fs.PermMask) (*fd.FD, error) +} + +// Open blocks until a host pipe can be opened or the action was cancelled. +// On success, returns fs.FileOperations wrapping the opened host pipe. +func Open(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (fs.FileOperations, error) { + p := &pipeOpenState{} + canceled := false + for { + if file, err := p.TryOpen(ctx, opener, flags); err != syserror.ErrWouldBlock { + return file, err + } + + // Honor the cancellation request if open still blocks. + if canceled { + // If we were canceled but we have a handle to a host + // file, we need to close it. + if p.hostFile != nil { + p.hostFile.Close() + } + return nil, syserror.ErrInterrupted + } + + cancel := ctx.SleepStart() + select { + case <-cancel: + // The cancellation request received here really says + // "cancel from now on (or ASAP)". Any environmental + // changes happened before receiving it, that might have + // caused open to not block anymore, should still be + // respected. So we cannot just return here. We have to + // give open another try below first. + canceled = true + ctx.SleepFinish(false) + case <-time.After(100 * time.Millisecond): + // If we would block, then delay retrying for a bit, since there + // is no way to know when the pipe would be ready to be + // re-opened. This is identical to sending an event notification + // to stop blocking in Task.Block, given that this routine will + // stop retrying if a cancelation is received. + ctx.SleepFinish(true) + } + } +} + +// pipeOpenState holds state needed to open a blocking named pipe read only, for instance the +// file that has been opened but doesn't yet have a corresponding writer. +type pipeOpenState struct { + // hostFile is the read only named pipe which lacks a corresponding writer. + hostFile *fd.FD +} + +// unwrapError is needed to match against ENXIO primarily. +func unwrapError(err error) error { + if pe, ok := err.(*os.PathError); ok { + return pe.Err + } + return err +} + +// TryOpen uses a NonBlockingOpener to try to open a host pipe, respecting the fs.FileFlags. +func (p *pipeOpenState) TryOpen(ctx context.Context, opener NonBlockingOpener, flags fs.FileFlags) (*pipeOperations, error) { + switch { + // Reject invalid configurations so they don't accidently succeed below. + case !flags.Read && !flags.Write: + return nil, syscall.EINVAL + + // Handle opening RDWR or with O_NONBLOCK: will never block, so try only once. + case (flags.Read && flags.Write) || flags.NonBlocking: + f, err := opener.NonBlockingOpen(ctx, fs.PermMask{Read: flags.Read, Write: flags.Write}) + if err != nil { + return nil, err + } + return newPipeOperations(ctx, opener, flags, f, nil) + + // Handle opening O_WRONLY blocking: convert ENXIO to syserror.ErrWouldBlock. + // See TryOpenWriteOnly for more details. + case flags.Write: + return p.TryOpenWriteOnly(ctx, opener) + + default: + // Handle opening O_RDONLY blocking: convert EOF from read to syserror.ErrWouldBlock. + // See TryOpenReadOnly for more details. + return p.TryOpenReadOnly(ctx, opener) + } +} + +// TryOpenReadOnly tries to open a host pipe read only but only returns a fs.File when +// there is a coordinating writer. Call TryOpenReadOnly repeatedly on the same pipeOpenState +// until syserror.ErrWouldBlock is no longer returned. +// +// How it works: +// +// Opening a pipe read only will return no error, but each non zero Read will return EOF +// until a writer becomes available, then EWOULDBLOCK. This is the only state change +// available to us. We keep a read ahead buffer in case we read bytes instead of getting +// EWOULDBLOCK, to be read from on the first read request to this fs.File. +func (p *pipeOpenState) TryOpenReadOnly(ctx context.Context, opener NonBlockingOpener) (*pipeOperations, error) { + // Waiting for a blocking read only open involves reading from the host pipe until + // bytes or other writers are available, so instead of retrying opening the pipe, + // it's necessary to retry reading from the pipe. To do this we need to keep around + // the read only pipe we opened, until success or an irrecoverable read error (at + // which point it must be closed). + if p.hostFile == nil { + var err error + p.hostFile, err = opener.NonBlockingOpen(ctx, fs.PermMask{Read: true}) + if err != nil { + return nil, err + } + } + + // Try to read from the pipe to see if writers are around. + tryReadBuffer := make([]byte, 1) + n, rerr := p.hostFile.Read(tryReadBuffer) + + // No bytes were read. + if n == 0 { + // EOF means that we're not ready yet. + if rerr == nil || rerr == io.EOF { + return nil, syserror.ErrWouldBlock + } + // Any error that is not EWOULDBLOCK also means we're not + // ready yet, and probably never will be ready. In this + // case we need to close the host pipe we opened. + if unwrapError(rerr) != syscall.EWOULDBLOCK { + p.hostFile.Close() + return nil, rerr + } + } + + // If any bytes were read, no matter the corresponding error, we need + // to keep them around so they can be read by the application. + var readAheadBuffer []byte + if n > 0 { + readAheadBuffer = tryReadBuffer + } + + // Successfully opened read only blocking pipe with either bytes available + // to read and/or a writer available. + return newPipeOperations(ctx, opener, fs.FileFlags{Read: true}, p.hostFile, readAheadBuffer) +} + +// TryOpenWriteOnly tries to open a host pipe write only but only returns a fs.File when +// there is a coordinating reader. Call TryOpenWriteOnly repeatedly on the same pipeOpenState +// until syserror.ErrWouldBlock is no longer returned. +// +// How it works: +// +// Opening a pipe write only will return ENXIO until readers are available. Converts the ENXIO +// to an syserror.ErrWouldBlock, to tell callers to retry. +func (*pipeOpenState) TryOpenWriteOnly(ctx context.Context, opener NonBlockingOpener) (*pipeOperations, error) { + hostFile, err := opener.NonBlockingOpen(ctx, fs.PermMask{Write: true}) + if unwrapError(err) == syscall.ENXIO { + return nil, syserror.ErrWouldBlock + } + if err != nil { + return nil, err + } + return newPipeOperations(ctx, opener, fs.FileFlags{Write: true}, hostFile, nil) +} diff --git a/pkg/sentry/fs/fdpipe/pipe_opener_test.go b/pkg/sentry/fs/fdpipe/pipe_opener_test.go new file mode 100644 index 000000000..83f6c1986 --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_opener_test.go @@ -0,0 +1,522 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "bytes" + "fmt" + "io" + "os" + "path" + "syscall" + "testing" + "time" + + "github.com/google/uuid" + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +type hostOpener struct { + name string +} + +func (h *hostOpener) NonBlockingOpen(_ context.Context, p fs.PermMask) (*fd.FD, error) { + var flags int + switch { + case p.Read && p.Write: + flags = syscall.O_RDWR + case p.Write: + flags = syscall.O_WRONLY + case p.Read: + flags = syscall.O_RDONLY + default: + return nil, syscall.EINVAL + } + f, err := syscall.Open(h.name, flags|syscall.O_NONBLOCK, 0666) + if err != nil { + return nil, err + } + return fd.New(f), nil +} + +func pipename() string { + return fmt.Sprintf(path.Join(os.TempDir(), "test-named-pipe-%s"), uuid.New()) +} + +func mkpipe(name string) error { + return syscall.Mknod(name, syscall.S_IFIFO|0666, 0) +} + +func TestTryOpen(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // makePipe is true if the test case should create the pipe. + makePipe bool + + // flags are the fs.FileFlags used to open the pipe. + flags fs.FileFlags + + // expectFile is true if a fs.File is expected. + expectFile bool + + // err is the expected error + err error + }{ + { + desc: "FileFlags lacking Read and Write are invalid", + makePipe: false, + flags: fs.FileFlags{}, /* bogus */ + expectFile: false, + err: syscall.EINVAL, + }, + { + desc: "NonBlocking Read only error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "NonBlocking Read only success returns immediately", + makePipe: true, + flags: fs.FileFlags{Read: true, NonBlocking: true}, + expectFile: true, + err: nil, + }, + { + desc: "NonBlocking Write only error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Write: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "NonBlocking Write only no reader error returns immediately", + makePipe: true, + flags: fs.FileFlags{Write: true, NonBlocking: true}, + expectFile: false, + err: syscall.ENXIO, + }, + { + desc: "ReadWrite error returns immediately", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true, Write: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "ReadWrite returns immediately", + makePipe: true, + flags: fs.FileFlags{Read: true, Write: true}, + expectFile: true, + err: nil, + }, + { + desc: "Blocking Write only returns open error", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Write: true}, + expectFile: false, + err: syscall.ENOENT, /* from bogus perms */ + }, + { + desc: "Blocking Read only returns open error", + makePipe: false, /* causes the error */ + flags: fs.FileFlags{Read: true}, + expectFile: false, + err: syscall.ENOENT, + }, + { + desc: "Blocking Write only returns with syserror.ErrWouldBlock", + makePipe: true, + flags: fs.FileFlags{Write: true}, + expectFile: false, + err: syserror.ErrWouldBlock, + }, + { + desc: "Blocking Read only returns with syserror.ErrWouldBlock", + makePipe: true, + flags: fs.FileFlags{Read: true}, + expectFile: false, + err: syserror.ErrWouldBlock, + }, + } { + name := pipename() + if test.makePipe { + // Create the pipe. We do this per-test case to keep tests independent. + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + } + + // Use a host opener to keep things simple. + opener := &hostOpener{name: name} + + pipeOpenState := &pipeOpenState{} + ctx := contexttest.Context(t) + pipeOps, err := pipeOpenState.TryOpen(ctx, opener, test.flags) + if unwrapError(err) != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + if pipeOps != nil { + // Cleanup the state of the pipe, and remove the fd from the + // fdnotifier. Sadly this needed to maintain the correctness + // of other tests because the fdnotifier is global. + pipeOps.Release() + } + continue + } + if (pipeOps != nil) != test.expectFile { + t.Errorf("%s: got non-nil file %v, want %v", test.desc, pipeOps != nil, test.expectFile) + } + if pipeOps != nil { + // Same as above. + pipeOps.Release() + } + } +} + +func TestPipeOpenUnblocksEventually(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // partnerIsReader is true if the goroutine opening the same pipe as the test case + // should open the pipe read only. Otherwise write only. This also means that the + // test case will open the pipe in the opposite way. + partnerIsReader bool + + // partnerIsBlocking is true if the goroutine opening the same pipe as the test case + // should do so without the O_NONBLOCK flag, otherwise opens the pipe with O_NONBLOCK + // until ENXIO is not returned. + partnerIsBlocking bool + }{ + { + desc: "Blocking Read with blocking writer partner opens eventually", + partnerIsReader: false, + partnerIsBlocking: true, + }, + { + desc: "Blocking Write with blocking reader partner opens eventually", + partnerIsReader: true, + partnerIsBlocking: true, + }, + { + desc: "Blocking Read with non-blocking writer partner opens eventually", + partnerIsReader: false, + partnerIsBlocking: false, + }, + { + desc: "Blocking Write with non-blocking reader partner opens eventually", + partnerIsReader: true, + partnerIsBlocking: false, + }, + } { + // Create the pipe. We do this per-test case to keep tests independent. + name := pipename() + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + + // Spawn the partner. + type fderr struct { + fd int + err error + } + errch := make(chan fderr, 1) + go func() { + var flags int + if test.partnerIsReader { + flags = syscall.O_RDONLY + } else { + flags = syscall.O_WRONLY + } + if test.partnerIsBlocking { + fd, err := syscall.Open(name, flags, 0666) + errch <- fderr{fd: fd, err: err} + } else { + var fd int + err := error(syscall.ENXIO) + for err == syscall.ENXIO { + fd, err = syscall.Open(name, flags|syscall.O_NONBLOCK, 0666) + time.Sleep(1 * time.Second) + } + errch <- fderr{fd: fd, err: err} + } + }() + + // Setup file flags for either a read only or write only open. + flags := fs.FileFlags{ + Read: !test.partnerIsReader, + Write: test.partnerIsReader, + } + + // Open the pipe in a blocking way, which should succeed eventually. + opener := &hostOpener{name: name} + ctx := contexttest.Context(t) + pipeOps, err := Open(ctx, opener, flags) + if pipeOps != nil { + // Same as TestTryOpen. + pipeOps.Release() + } + + // Check that the partner opened the file successfully. + e := <-errch + if e.err != nil { + t.Errorf("%s: partner got error %v, wanted nil", test.desc, e.err) + continue + } + // If so, then close the partner fd to avoid leaking an fd. + syscall.Close(e.fd) + + // Check that our blocking open was successful. + if err != nil { + t.Errorf("%s: blocking open got error %v, wanted nil", test.desc, err) + continue + } + if pipeOps == nil { + t.Errorf("%s: blocking open got nil file, wanted non-nil", test.desc) + continue + } + } +} + +func TestCopiedReadAheadBuffer(t *testing.T) { + // Create the pipe. + name := pipename() + if err := mkpipe(name); err != nil { + t.Fatalf("failed to make host pipe: %v", err) + } + defer syscall.Unlink(name) + + // We're taking advantage of the fact that pipes opened read only always return + // success, but internally they are not deemed "opened" until we're sure that + // another writer comes along. This means we can open the same pipe write only + // with no problems + write to it, given that opener.Open already tried to open + // the pipe RDONLY and succeeded, which we know happened if TryOpen returns + // syserror.ErrwouldBlock. + // + // This simulates the open(RDONLY) <-> open(WRONLY)+write race we care about, but + // does not cause our test to be racy (which would be terrible). + opener := &hostOpener{name: name} + pipeOpenState := &pipeOpenState{} + ctx := contexttest.Context(t) + pipeOps, err := pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true}) + if pipeOps != nil { + pipeOps.Release() + t.Fatalf("open(%s, %o) got file, want nil", name, syscall.O_RDONLY) + } + if err != syserror.ErrWouldBlock { + t.Fatalf("open(%s, %o) got error %v, want %v", name, syscall.O_RDONLY, err, syserror.ErrWouldBlock) + } + + // Then open the same pipe write only and write some bytes to it. The next + // time we try to open the pipe read only again via the pipeOpenState, we should + // succeed and buffer some of the bytes written. + fd, err := syscall.Open(name, syscall.O_WRONLY, 0666) + if err != nil { + t.Fatalf("open(%s, %o) got error %v, want nil", name, syscall.O_WRONLY, err) + } + defer syscall.Close(fd) + + data := []byte("hello") + if n, err := syscall.Write(fd, data); n != len(data) || err != nil { + t.Fatalf("write(%v) got (%d, %v), want (%d, nil)", data, n, err, len(data)) + } + + // Try the read again, knowing that it should succeed this time. + pipeOps, err = pipeOpenState.TryOpen(ctx, opener, fs.FileFlags{Read: true}) + if pipeOps == nil { + t.Fatalf("open(%s, %o) got nil file, want not nil", name, syscall.O_RDONLY) + } + defer pipeOps.Release() + + if err != nil { + t.Fatalf("open(%s, %o) got error %v, want nil", name, syscall.O_RDONLY, err) + } + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(inode, "pipe"), fs.FileFlags{Read: true}, pipeOps) + + // Check that the file we opened points to a pipe with a non-empty read ahead buffer. + bufsize := len(pipeOps.readAheadBuffer) + if bufsize != 1 { + t.Fatalf("read ahead buffer got %d bytes, want %d", bufsize, 1) + } + + // Now for the final test, try to read everything in, expecting to get back all of + // the bytes that were written at once. Note that in the wild there is no atomic + // read size so expecting to get all bytes from a single writer when there are + // multiple readers is a bad expectation. + buf := make([]byte, len(data)) + ioseq := usermem.BytesIOSequence(buf) + n, err := pipeOps.Read(ctx, file, ioseq, 0) + if err != nil { + t.Fatalf("read request got error %v, want nil", err) + } + if n != int64(len(data)) { + t.Fatalf("read request got %d bytes, want %d", n, len(data)) + } + if !bytes.Equal(buf, data) { + t.Errorf("read request got bytes [%v], want [%v]", buf, data) + } +} + +func TestPipeHangup(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // flags control how we open our end of the pipe and must be read + // only or write only. They also dicate how a coordinating partner + // fd is opened, which is their inverse (read only -> write only, etc). + flags fs.FileFlags + + // hangupSelf if true causes the test case to close our end of the pipe + // and causes hangup errors to be asserted on our coordinating partner's + // fd. If hangupSelf is false, then our partner's fd is closed and the + // hangup errors are expected on our end of the pipe. + hangupSelf bool + }{ + { + desc: "Read only gets hangup error", + flags: fs.FileFlags{Read: true}, + }, + { + desc: "Write only gets hangup error", + flags: fs.FileFlags{Write: true}, + }, + { + desc: "Read only generates hangup error", + flags: fs.FileFlags{Read: true}, + hangupSelf: true, + }, + { + desc: "Write only generates hangup error", + flags: fs.FileFlags{Write: true}, + hangupSelf: true, + }, + } { + if test.flags.Read == test.flags.Write { + t.Errorf("%s: test requires a single reader or writer", test.desc) + continue + } + + // Create the pipe. We do this per-test case to keep tests independent. + name := pipename() + if err := mkpipe(name); err != nil { + t.Errorf("%s: failed to make host pipe: %v", test.desc, err) + continue + } + defer syscall.Unlink(name) + + // Fire off a partner routine which tries to open the same pipe blocking, + // which will synchronize with us. The channel allows us to get back the + // fd once we expect this partner routine to succeed, so we can manifest + // hangup events more directly. + fdchan := make(chan int, 1) + go func() { + // Be explicit about the flags to protect the test from + // misconfiguration. + var flags int + if test.flags.Read { + flags = syscall.O_WRONLY + } else { + flags = syscall.O_RDONLY + } + fd, err := syscall.Open(name, flags, 0666) + if err != nil { + t.Logf("Open(%q, %o, 0666) partner failed: %v", name, flags, err) + } + fdchan <- fd + }() + + // Open our end in a blocking way to ensure that we coordinate. + opener := &hostOpener{name: name} + ctx := contexttest.Context(t) + pipeOps, err := Open(ctx, opener, test.flags) + if err != nil { + t.Errorf("%s: Open got error %v, want nil", test.desc, err) + continue + } + // Don't defer file.DecRef here because that causes the hangup we're + // trying to test for. + + // Expect the partner routine to have coordinated with us and get back + // its open fd. + f := <-fdchan + if f < 0 { + t.Errorf("%s: partner routine got fd %d, want > 0", test.desc, f) + pipeOps.Release() + continue + } + + if test.hangupSelf { + // Hangup self and assert that our partner got the expected hangup + // error. + pipeOps.Release() + + if test.flags.Read { + // Partner is writer. + assertWriterHungup(t, test.desc, fd.NewReadWriter(f)) + } else { + // Partner is reader. + assertReaderHungup(t, test.desc, fd.NewReadWriter(f)) + } + } else { + // Hangup our partner and expect us to get the hangup error. + syscall.Close(f) + defer pipeOps.Release() + + if test.flags.Read { + assertReaderHungup(t, test.desc, pipeOps.(*pipeOperations).file) + } else { + assertWriterHungup(t, test.desc, pipeOps.(*pipeOperations).file) + } + } + } +} + +func assertReaderHungup(t *testing.T, desc string, reader io.Reader) bool { + // Drain the pipe completely, it might have crap in it, but expect EOF eventually. + var err error + for err == nil { + _, err = reader.Read(make([]byte, 10)) + } + if err != io.EOF { + t.Errorf("%s: read from self after hangup got error %v, want %v", desc, err, io.EOF) + return false + } + return true +} + +func assertWriterHungup(t *testing.T, desc string, writer io.Writer) bool { + if _, err := writer.Write([]byte("hello")); unwrapError(err) != syscall.EPIPE { + t.Errorf("%s: write to self after hangup got error %v, want %v", desc, err, syscall.EPIPE) + return false + } + return true +} diff --git a/pkg/sentry/fs/fdpipe/pipe_state.go b/pkg/sentry/fs/fdpipe/pipe_state.go new file mode 100644 index 000000000..8996a2178 --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_state.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "fmt" + "io/ioutil" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// beforeSave is invoked by stateify. +func (p *pipeOperations) beforeSave() { + if p.flags.Read { + data, err := ioutil.ReadAll(p.file) + if err != nil && !isBlockError(err) { + panic(fmt.Sprintf("failed to read from pipe: %v", err)) + } + p.readAheadBuffer = append(p.readAheadBuffer, data...) + } else if p.flags.Write { + file, err := p.opener.NonBlockingOpen(context.Background(), fs.PermMask{Write: true}) + if err != nil { + panic(fs.ErrSaveRejection{fmt.Errorf("write-only pipe end cannot be re-opened as %v: %v", p, err)}) + } + file.Close() + } +} + +// saveFlags is invoked by stateify. +func (p *pipeOperations) saveFlags() fs.FileFlags { + return p.flags +} + +// readPipeOperationsLoading is used to ensure that write-only pipe fds are +// opened after read/write and read-only pipe fds, to avoid ENXIO when +// multiple pipe fds refer to different ends of the same pipe. +var readPipeOperationsLoading sync.WaitGroup + +// loadFlags is invoked by stateify. +func (p *pipeOperations) loadFlags(flags fs.FileFlags) { + // This is a hack to ensure that readPipeOperationsLoading includes all + // readable pipe fds before any asynchronous calls to + // readPipeOperationsLoading.Wait(). + if flags.Read { + readPipeOperationsLoading.Add(1) + } + p.flags = flags +} + +// afterLoad is invoked by stateify. +func (p *pipeOperations) afterLoad() { + load := func() { + if !p.flags.Read { + readPipeOperationsLoading.Wait() + } else { + defer readPipeOperationsLoading.Done() + } + var err error + p.file, err = p.opener.NonBlockingOpen(context.Background(), fs.PermMask{ + Read: p.flags.Read, + Write: p.flags.Write, + }) + if err != nil { + panic(fmt.Sprintf("unable to open pipe %v: %v", p, err)) + } + if err := p.init(); err != nil { + panic(fmt.Sprintf("unable to initialize pipe %v: %v", p, err)) + } + } + + // Do background opening of pipe ends. Note for write-only pipe ends we + // have to do it asynchronously to avoid blocking the restore. + fs.Async(load) +} diff --git a/pkg/sentry/fs/fdpipe/pipe_test.go b/pkg/sentry/fs/fdpipe/pipe_test.go new file mode 100644 index 000000000..6cd314f5b --- /dev/null +++ b/pkg/sentry/fs/fdpipe/pipe_test.go @@ -0,0 +1,489 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdpipe + +import ( + "bytes" + "io" + "os" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +func singlePipeFD() (int, error) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + return -1, err + } + syscall.Close(fds[1]) + return fds[0], nil +} + +func singleDirFD() (int, error) { + return syscall.Open(os.TempDir(), syscall.O_RDONLY, 0666) +} + +func mockPipeDirent(t *testing.T) *fs.Dirent { + ctx := contexttest.Context(t) + node := fs.NewMockInodeOperations(ctx) + node.UAttr = fs.UnstableAttr{ + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + } + inode := fs.NewInode(node, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + BlockSize: usermem.PageSize, + }) + return fs.NewDirent(inode, "") +} + +func TestNewPipe(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // getfd generates the fd to pass to newPipeOperations. + getfd func() (int, error) + + // flags are the fs.FileFlags passed to newPipeOperations. + flags fs.FileFlags + + // readAheadBuffer is the buffer passed to newPipeOperations. + readAheadBuffer []byte + + // err is the expected error. + err error + }{ + { + desc: "Cannot make new pipe from bad fd", + getfd: func() (int, error) { return -1, nil }, + err: syscall.EINVAL, + }, + { + desc: "Cannot make new pipe from non-pipe fd", + getfd: singleDirFD, + err: syscall.EINVAL, + }, + { + desc: "Can make new pipe from pipe fd", + getfd: singlePipeFD, + flags: fs.FileFlags{Read: true}, + readAheadBuffer: []byte("hello"), + }, + } { + gfd, err := test.getfd() + if err != nil { + t.Errorf("%s: getfd got (%d, %v), want (fd, nil)", test.desc, gfd, err) + continue + } + f := fd.New(gfd) + + p, err := newPipeOperations(contexttest.Context(t), nil, test.flags, f, test.readAheadBuffer) + if p != nil { + // This is necessary to remove the fd from the global fd notifier. + defer p.Release() + } else { + // If there is no p to DecRef on, because newPipeOperations failed, then the + // file still needs to be closed. + defer f.Close() + } + + if err != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + continue + } + // Check the state of the pipe given that it was successfully opened. + if err == nil { + if p == nil { + t.Errorf("%s: got nil pipe and nil error, want (pipe, nil)", test.desc) + continue + } + if flags := p.flags; test.flags != flags { + t.Errorf("%s: got file flags %s, want %s", test.desc, flags, test.flags) + continue + } + if len(test.readAheadBuffer) != len(p.readAheadBuffer) { + t.Errorf("%s: got read ahead buffer length %d, want %d", test.desc, len(p.readAheadBuffer), len(test.readAheadBuffer)) + continue + } + fileFlags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(p.file.FD()), syscall.F_GETFL, 0) + if errno != 0 { + t.Errorf("%s: failed to get file flags for fd %d, got %v, want 0", test.desc, p.file.FD(), errno) + continue + } + if fileFlags&syscall.O_NONBLOCK == 0 { + t.Errorf("%s: pipe is blocking, expected non-blocking", test.desc) + continue + } + if !fdnotifier.HasFD(int32(f.FD())) { + t.Errorf("%s: pipe fd %d is not registered for events", test.desc, f.FD) + } + } + } +} + +func TestPipeDestruction(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + f := fd.New(fds[0]) + + // We don't care about the other end, just use the read end. + syscall.Close(fds[1]) + + // Test the read end, but it doesn't really matter which. + p, err := newPipeOperations(contexttest.Context(t), nil, fs.FileFlags{Read: true}, f, nil) + if err != nil { + f.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Drop our only reference, which should trigger the destructor. + p.Release() + + if fdnotifier.HasFD(int32(fds[0])) { + t.Fatalf("after DecRef fdnotifier has fd %d, want no longer registered", fds[0]) + } + if p.file != nil { + t.Errorf("after DecRef got file, want nil") + } +} + +type Seek struct{} + +type ReadDir struct{} + +type Writev struct { + Src usermem.IOSequence +} + +type Readv struct { + Dst usermem.IOSequence +} + +type Fsync struct{} + +func TestPipeRequest(t *testing.T) { + for _, test := range []struct { + // desc is the test's description. + desc string + + // request to execute. + context interface{} + + // flags determines whether to use the read or write end + // of the pipe, for this test it can only be Read or Write. + flags fs.FileFlags + + // keepOpenPartner if false closes the other end of the pipe, + // otherwise this is delayed until the end of the test. + keepOpenPartner bool + + // expected error + err error + }{ + { + desc: "ReadDir on pipe returns ENOTDIR", + context: &ReadDir{}, + err: syscall.ENOTDIR, + }, + { + desc: "Fsync on pipe returns EINVAL", + context: &Fsync{}, + err: syscall.EINVAL, + }, + { + desc: "Seek on pipe returns ESPIPE", + context: &Seek{}, + err: syscall.ESPIPE, + }, + { + desc: "Readv on pipe from empty buffer returns nil", + context: &Readv{Dst: usermem.BytesIOSequence(nil)}, + flags: fs.FileFlags{Read: true}, + }, + { + desc: "Readv on pipe from non-empty buffer and closed partner returns EOF", + context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))}, + flags: fs.FileFlags{Read: true}, + err: io.EOF, + }, + { + desc: "Readv on pipe from non-empty buffer and open partner returns EWOULDBLOCK", + context: &Readv{Dst: usermem.BytesIOSequence(make([]byte, 10))}, + flags: fs.FileFlags{Read: true}, + keepOpenPartner: true, + err: syserror.ErrWouldBlock, + }, + { + desc: "Writev on pipe from empty buffer returns nil", + context: &Writev{Src: usermem.BytesIOSequence(nil)}, + flags: fs.FileFlags{Write: true}, + }, + { + desc: "Writev on pipe from non-empty buffer and closed partner returns EPIPE", + context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))}, + flags: fs.FileFlags{Write: true}, + err: syscall.EPIPE, + }, + { + desc: "Writev on pipe from non-empty buffer and open partner succeeds", + context: &Writev{Src: usermem.BytesIOSequence([]byte("hello"))}, + flags: fs.FileFlags{Write: true}, + keepOpenPartner: true, + }, + } { + if test.flags.Read && test.flags.Write { + panic("both read and write not supported for this test") + } + + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Errorf("%s: failed to create pipes: got %v, want nil", test.desc, err) + continue + } + + // Configure the fd and partner fd based on the file flags. + testFd, partnerFd := fds[0], fds[1] + if test.flags.Write { + testFd, partnerFd = fds[1], fds[0] + } + + // Configure closing the fds. + if test.keepOpenPartner { + defer syscall.Close(partnerFd) + } else { + syscall.Close(partnerFd) + } + + // Create the pipe. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, test.flags, fd.New(testFd), nil) + if err != nil { + t.Fatalf("%s: newPipeOperations got error %v, want nil", test.desc, err) + } + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Pipe}) + file := fs.NewFile(ctx, fs.NewDirent(inode, "pipe"), fs.FileFlags{Read: true}, p) + + // Issue request via the appropriate function. + switch c := test.context.(type) { + case *Seek: + _, err = p.Seek(ctx, file, 0, 0) + case *ReadDir: + _, err = p.Readdir(ctx, file, nil) + case *Readv: + _, err = p.Read(ctx, file, c.Dst, 0) + case *Writev: + _, err = p.Write(ctx, file, c.Src, 0) + case *Fsync: + err = p.Fsync(ctx, file, 0, fs.FileMaxOffset, fs.SyncAll) + default: + t.Errorf("%s: unknown request type %T", test.desc, test.context) + } + + if unwrapError(err) != test.err { + t.Errorf("%s: got error %v, want %v", test.desc, err, test.err) + } + } +} + +func TestPipeReadAheadBuffer(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + rfile := fd.New(fds[0]) + + // Eventually close the write end, which is not wrapped in a pipe object. + defer syscall.Close(fds[1]) + + // Write some bytes to this end. + data := []byte("world") + if n, err := syscall.Write(fds[1], data); n != len(data) || err != nil { + rfile.Close() + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data)) + } + // Close the write end immediately, we don't care about it. + + buffered := []byte("hello ") + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, buffered) + if err != nil { + rfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(inode, "pipe"), fs.FileFlags{Read: true}, p) + + // In total we expect to read data + buffered. + total := append(buffered, data...) + + buf := make([]byte, len(total)) + iov := usermem.BytesIOSequence(buf) + n, err := p.Read(contexttest.Context(t), file, iov, 0) + if err != nil { + t.Fatalf("read request got error %v, want nil", err) + } + if n != int64(len(total)) { + t.Fatalf("read request got %d bytes, want %d", n, len(total)) + } + if !bytes.Equal(buf, total) { + t.Errorf("read request got bytes [%v], want [%v]", buf, total) + } +} + +// This is very important for pipes in general because they can return EWOULDBLOCK and for +// those that block they must continue until they have read all of the data (and report it +// as such. +func TestPipeReadsAccumulate(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + rfile := fd.New(fds[0]) + + // Eventually close the write end, it doesn't depend on a pipe object. + defer syscall.Close(fds[1]) + + // Get a new read only pipe reference. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Read: true}, rfile, nil) + if err != nil { + rfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Don't forget to remove the fd from the fd notifier. Otherwise other tests will + // likely be borked, because it's global :( + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(inode, "pipe"), fs.FileFlags{Read: true}, p) + + // Write some some bytes to the pipe. + data := []byte("some message") + if n, err := syscall.Write(fds[1], data); n != len(data) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(data)) + } + + // Construct a segment vec that is a bit more than we have written so we trigger + // an EWOULDBLOCK. + wantBytes := len(data) + 1 + readBuffer := make([]byte, wantBytes) + iov := usermem.BytesIOSequence(readBuffer) + n, err := p.Read(ctx, file, iov, 0) + total := n + iov = iov.DropFirst64(n) + if err != syserror.ErrWouldBlock { + t.Fatalf("Readv got error %v, want %v", err, syserror.ErrWouldBlock) + } + + // Write a few more bytes to allow us to read more/accumulate. + extra := []byte("extra") + if n, err := syscall.Write(fds[1], extra); n != len(extra) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(extra)) + } + + // This time, using the same request, we should not block. + n, err = p.Read(ctx, file, iov, 0) + total += n + if err != nil { + t.Fatalf("Readv got error %v, want nil", err) + } + + // Assert that the result we got back is cumulative. + if total != int64(wantBytes) { + t.Fatalf("Readv sequence got %d bytes, want %d", total, wantBytes) + } + + if want := append(data, extra[0]); !bytes.Equal(readBuffer, want) { + t.Errorf("Readv sequence got %v, want %v", readBuffer, want) + } +} + +// Same as TestReadsAccumulate. +func TestPipeWritesAccumulate(t *testing.T) { + fds := make([]int, 2) + if err := syscall.Pipe(fds); err != nil { + t.Fatalf("failed to create pipes: got %v, want nil", err) + } + wfile := fd.New(fds[1]) + + // Eventually close the read end, it doesn't depend on a pipe object. + defer syscall.Close(fds[0]) + + // Get a new write only pipe reference. + ctx := contexttest.Context(t) + p, err := newPipeOperations(ctx, nil, fs.FileFlags{Write: true}, wfile, nil) + if err != nil { + wfile.Close() + t.Fatalf("newPipeOperations got error %v, want nil", err) + } + // Don't forget to remove the fd from the fd notifier. Otherwise other tests will + // likely be borked, because it's global :( + defer p.Release() + + inode := fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{ + Type: fs.Pipe, + }) + file := fs.NewFile(ctx, fs.NewDirent(inode, "pipe"), fs.FileFlags{Read: true}, p) + + // Construct a segment vec that is larger than the pipe size to trigger an EWOULDBLOCK. + wantBytes := 65536 * 2 + writeBuffer := make([]byte, wantBytes) + for i := 0; i < wantBytes; i++ { + writeBuffer[i] = 'a' + } + iov := usermem.BytesIOSequence(writeBuffer) + n, err := p.Write(ctx, file, iov, 0) + total := n + iov = iov.DropFirst64(n) + if err != syserror.ErrWouldBlock { + t.Fatalf("Writev got error %v, want %v", err, syserror.ErrWouldBlock) + } + + // Read the entire pipe buf size to make space for the second half. + throwAway := make([]byte, 65536) + if n, err := syscall.Read(fds[0], throwAway); n != len(throwAway) || err != nil { + t.Fatalf("write to pipe got (%d, %v), want (%d, nil)", n, err, len(throwAway)) + } + + // This time we should not block. + n, err = p.Write(ctx, file, iov, 0) + total += n + if err != nil { + t.Fatalf("Writev got error %v, want nil", err) + } + + // Assert that the result we got back is cumulative. + if total != int64(wantBytes) { + t.Fatalf("Writev sequence got %d bytes, want %d", total, wantBytes) + } +} diff --git a/pkg/sentry/fs/file.go b/pkg/sentry/fs/file.go new file mode 100644 index 000000000..de2e80bf0 --- /dev/null +++ b/pkg/sentry/fs/file.go @@ -0,0 +1,404 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "math" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/amutex" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// FileMaxOffset is the maximum possible file offset. +const FileMaxOffset = math.MaxInt64 + +// File is an open file handle. It is thread-safe. +// +// File provides stronger synchronization guarantees than Linux. Linux +// synchronizes lseek(2), read(2), and write(2) with respect to the file +// offset for regular files and only for those interfaces. See +// fs/read_write.c:fdget_pos, fs.read_write.c:fdput_pos and FMODE_ATOMIC_POS. +// +// In contrast, File synchronizes any operation that could take a long time +// under a single abortable mutex which also synchronizes lseek(2), read(2), +// and write(2). +// +// FIXME: Split synchronization from cancellation. +type File struct { + refs.AtomicRefCount + + // UniqueID is the globally unique identifier of the File. + UniqueID uint64 + + // Dirent is the Dirent backing this File. This encodes the name + // of the File via Dirent.FullName() as well as its identity via the + // Dirent's Inode. The Dirent is non-nil. + // + // A File holds a reference to this Dirent. Using the returned Dirent is + // only safe as long as a reference on the File is held. The association + // between a File and a Dirent is immutable. + // + // Files that are not parented in a filesystem return a root Dirent + // that holds a reference to their Inode. + // + // The name of the Dirent may reflect parentage if the Dirent is not a + // root Dirent or the identity of the File on a pseudo filesystem (pipefs, + // sockfs, etc). + // + // Multiple Files may hold a reference to the same Dirent. This is the + // common case for Files that are parented and maintain consistency with + // other files via the Dirent cache. + Dirent *Dirent + + // flags are the File's flags. Setting or getting flags is fully atomic + // and is not protected by mu (below). + flags atomic.Value `state:".(FileFlags)"` + + // mu is dual-purpose: first, to make read(2) and write(2) thread-safe + // in conformity with POSIX, and second, to cancel operations before they + // begin in response to interruptions (i.e. signals). + mu amutex.AbortableMutex `state:"nosave"` + + // FileOperations implements file system specific behavior for this File. + FileOperations FileOperations + + // offset is the File's offset. Updating offset is protected by mu but + // can be read atomically via File.Offset() outside of mu. + offset int64 +} + +// NewFile returns a File. It takes a reference on the Dirent and owns the +// lifetime of the FileOperations. Files that do not support reading and +// writing at an arbitrary offset should set flags.Pread and flags.Pwrite +// to false respectively. +func NewFile(ctx context.Context, dirent *Dirent, flags FileFlags, fops FileOperations) *File { + dirent.IncRef() + f := &File{ + UniqueID: uniqueid.GlobalFromContext(ctx), + Dirent: dirent, + FileOperations: fops, + } + f.flags.Store(flags) + f.mu.Init() + return f +} + +// DecRef destroys the File when it is no longer referenced. +func (f *File) DecRef() { + f.DecRefWithDestructor(func() { + // Drop BSD style locks. + lockRng := lock.LockRange{Start: 0, End: lock.LockEOF} + f.Dirent.Inode.LockCtx.BSD.UnlockRegion(lock.UniqueID(f.UniqueID), lockRng) + + // Release resources held by the FileOperations. + f.FileOperations.Release() + + // Release a reference on the Dirent. + f.Dirent.DecRef() + }) +} + +// Flags atomically loads the File's flags. +func (f *File) Flags() FileFlags { + return f.flags.Load().(FileFlags) +} + +// SetFlags atomically changes the File's flags to the values contained +// in newFlags. See SettableFileFlags for values that can be set. +func (f *File) SetFlags(newFlags SettableFileFlags) { + flags := f.flags.Load().(FileFlags) + flags.Direct = newFlags.Direct + flags.NonBlocking = newFlags.NonBlocking + flags.Append = newFlags.Append + f.flags.Store(flags) +} + +// Offset atomically loads the File's offset. +func (f *File) Offset() int64 { + return atomic.LoadInt64(&f.offset) +} + +// Readiness implements waiter.Waitable.Readiness. +func (f *File) Readiness(mask waiter.EventMask) waiter.EventMask { + return f.FileOperations.Readiness(mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (f *File) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + f.FileOperations.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (f *File) EventUnregister(e *waiter.Entry) { + f.FileOperations.EventUnregister(e) +} + +// Seek calls f.FileOperations.Seek with f as the File, updating the file +// offset to the value returned by f.FileOperations.Seek if the operation +// is successful. +// +// Returns syserror.ErrInterrupted if seeking was interrupted. +func (f *File) Seek(ctx context.Context, whence SeekWhence, offset int64) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + defer f.mu.Unlock() + + newOffset, err := f.FileOperations.Seek(ctx, f, whence, offset) + if err == nil { + atomic.StoreInt64(&f.offset, newOffset) + } + return newOffset, err +} + +// Readdir reads the directory entries of this File and writes them out +// to the DentrySerializer until entries can no longer be written. If even +// a single directory entry is written then Readdir returns a nil error +// and the directory offset is advanced. +// +// Readdir unconditionally updates the access time on the File's Inode, +// see fs/readdir.c:iterate_dir. +// +// Returns syserror.ErrInterrupted if reading was interrupted. +func (f *File) Readdir(ctx context.Context, serializer DentrySerializer) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + offset, err := f.FileOperations.Readdir(ctx, f, serializer) + atomic.StoreInt64(&f.offset, offset) + return err +} + +// Readv calls f.FileOperations.Read with f as the File, advancing the file +// offset if f.FileOperations.Read returns bytes read > 0. +// +// Returns syserror.ErrInterrupted if reading was interrupted. +func (f *File) Readv(ctx context.Context, dst usermem.IOSequence) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + + n, err := f.FileOperations.Read(ctx, f, dst, f.offset) + if n > 0 { + atomic.AddInt64(&f.offset, n) + } + f.mu.Unlock() + return n, err +} + +// Preadv calls f.FileOperations.Read with f as the File. It does not +// advance the file offset. If !f.Flags().Pread, Preadv should not be +// called. +// +// Otherwise same as Readv. +func (f *File) Preadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + + n, err := f.FileOperations.Read(ctx, f, dst, offset) + f.mu.Unlock() + return n, err +} + +// Writev calls f.FileOperations.Write with f as the File, advancing the +// file offset if f.FileOperations.Write returns bytes written > 0. +// +// Writev positions the write offset at EOF if f.Flags().Append. This is +// unavoidably racy for network file systems. Writev also truncates src +// to avoid overrunning the current file size limit if necessary. +// +// Returns syserror.ErrInterrupted if writing was interrupted. +func (f *File) Writev(ctx context.Context, src usermem.IOSequence) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + + offset, err := f.checkWriteLocked(ctx, &src, f.offset) + if err != nil { + f.mu.Unlock() + return 0, err + } + n, err := f.FileOperations.Write(ctx, f, src, offset) + if n >= 0 { + atomic.StoreInt64(&f.offset, offset+n) + } + f.mu.Unlock() + return n, err +} + +// Pwritev calls f.FileOperations.Write with f as the File. It does not +// advance the file offset. If !f.Flags().Pwritev, Pwritev should not be +// called. +// +// Otherwise same as Writev. +func (f *File) Pwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + if !f.mu.Lock(ctx) { + return 0, syserror.ErrInterrupted + } + + offset, err := f.checkWriteLocked(ctx, &src, offset) + if err != nil { + f.mu.Unlock() + return 0, err + } + n, err := f.FileOperations.Write(ctx, f, src, offset) + f.mu.Unlock() + return n, err +} + +// checkWriteLocked returns the offset to write at or an error if the write +// would not succeed. May update src to fit a write operation into a file +// size limit. +func (f *File) checkWriteLocked(ctx context.Context, src *usermem.IOSequence, offset int64) (int64, error) { + // Handle append only files. Note that this is still racy for network + // filesystems. + if f.Flags().Append { + uattr, err := f.Dirent.Inode.UnstableAttr(ctx) + if err != nil { + // This is an odd error, most likely it is evidence + // that something is terribly wrong with the filesystem. + // Return a generic EIO error. + log.Warningf("Failed to check write of inode %#v: %v", f.Dirent.Inode.StableAttr, err) + return offset, syserror.EIO + } + offset = uattr.Size + } + + // Is this a regular file? + if IsRegular(f.Dirent.Inode.StableAttr) { + // Enforce size limits. + fileSizeLimit := limits.FromContext(ctx).Get(limits.FileSize).Cur + if fileSizeLimit <= math.MaxInt64 { + if offset >= int64(fileSizeLimit) { + return offset, syserror.ErrExceedsFileSizeLimit + } + *src = src.TakeFirst64(int64(fileSizeLimit) - offset) + } + } + + return offset, nil +} + +// Fsync calls f.FileOperations.Fsync with f as the File. +// +// Returns syserror.ErrInterrupted if syncing was interrupted. +func (f *File) Fsync(ctx context.Context, start int64, end int64, syncType SyncType) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.Fsync(ctx, f, start, end, syncType) +} + +// Flush calls f.FileOperations.Flush with f as the File. +// +// Returns syserror.ErrInterrupted if syncing was interrupted. +func (f *File) Flush(ctx context.Context) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.Flush(ctx, f) +} + +// ConfigureMMap calls f.FileOperations.ConfigureMMap with f as the File. +// +// Returns syserror.ErrInterrupted if interrupted. +func (f *File) ConfigureMMap(ctx context.Context, opts *memmap.MMapOpts) error { + if !f.mu.Lock(ctx) { + return syserror.ErrInterrupted + } + defer f.mu.Unlock() + + return f.FileOperations.ConfigureMMap(ctx, f, opts) +} + +// MappedName implements memmap.MappingIdentity.MappedName. +func (f *File) MappedName(ctx context.Context) string { + name, _ := f.Dirent.FullName(RootFromContext(ctx)) + return name +} + +// DeviceID implements memmap.MappingIdentity.DeviceID. +func (f *File) DeviceID() uint64 { + return f.Dirent.Inode.StableAttr.DeviceID +} + +// InodeID implements memmap.MappingIdentity.InodeID. +func (f *File) InodeID() uint64 { + return f.Dirent.Inode.StableAttr.InodeID +} + +// Msync implements memmap.MappingIdentity.Msync. +func (f *File) Msync(ctx context.Context, mr memmap.MappableRange) error { + return f.Fsync(ctx, int64(mr.Start), int64(mr.End-1), SyncData) +} + +// FileReader implements io.Reader and io.ReaderAt. +type FileReader struct { + // Ctx is the context for the file reader. + Ctx context.Context + + // File is the file to read from. + File *File +} + +// Read implements io.Reader.Read. +func (r *FileReader) Read(buf []byte) (int, error) { + n, err := r.File.Readv(r.Ctx, usermem.BytesIOSequence(buf)) + return int(n), err +} + +// ReadAt implementes io.Reader.ReadAt. +func (r *FileReader) ReadAt(buf []byte, offset int64) (int, error) { + n, err := r.File.Preadv(r.Ctx, usermem.BytesIOSequence(buf), offset) + return int(n), err +} + +// FileWriter implements io.Writer and io.WriterAt. +type FileWriter struct { + // Ctx is the context for the file writer. + Ctx context.Context + + // File is the file to write to. + File *File +} + +// Write implements io.Writer.Write. +func (w *FileWriter) Write(buf []byte) (int, error) { + n, err := w.File.Writev(w.Ctx, usermem.BytesIOSequence(buf)) + return int(n), err +} + +// WriteAt implements io.Writer.WriteAt. +func (w *FileWriter) WriteAt(buf []byte, offset int64) (int, error) { + n, err := w.File.Pwritev(w.Ctx, usermem.BytesIOSequence(buf), offset) + return int(n), err +} diff --git a/pkg/sentry/fs/file_operations.go b/pkg/sentry/fs/file_operations.go new file mode 100644 index 000000000..d223bb5c7 --- /dev/null +++ b/pkg/sentry/fs/file_operations.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// FileOperations are operations on a File that diverge per file system. +// +// Operations that take a *File may use only the following interfaces: +// +// - File.UniqueID: Operations may only read this value. +// - File.Dirent: Operations must not take or drop a reference. +// - File.Offset(): This value is guaranteed to not change for the +// duration of the operation. +// - File.Flags(): This value may change during the operation. +type FileOperations interface { + // Release release resources held by FileOperations. + Release() + + // Waitable defines how this File can be waited on for read and + // write readiness. + waiter.Waitable + + // Seek seeks to offset based on SeekWhence. Returns the new + // offset or no change in the offset and an error. + Seek(ctx context.Context, file *File, whence SeekWhence, offset int64) (int64, error) + + // Readdir reads the directory entries of file and serializes them + // using serializer. + // + // Returns the new directory offset or no change in the offset and + // an error. The offset returned must not be less than file.Offset(). + // + // Serialization of directory entries must not happen asynchronously. + Readdir(ctx context.Context, file *File, serializer DentrySerializer) (int64, error) + + // Read reads from file into dst at offset and returns the number + // of bytes read which must be greater than or equal to 0. File + // systems that do not support reading at an offset, (i.e. pipefs, + // sockfs) may ignore the offset. These file systems are expected + // to construct Files with !FileFlags.Pread. + // + // Read may return a nil error and only partially fill dst (at or + // before EOF). If the file represents a symlink, Read reads the target + // value of the symlink. + // + // Read does not check permissions nor flags. + // + // Read must not be called if !FileFlags.Read. + Read(ctx context.Context, file *File, dst usermem.IOSequence, offset int64) (int64, error) + + // Write writes src to file at offset and returns the number of bytes + // written which must be greater than or equal to 0. Like Read, file + // systems that do not support writing at an offset (i.e. pipefs, sockfs) + // may ignore the offset. These file systems are expected to construct + // Files with !FileFlags.Pwrite. + // + // If only part of src could be written, Write must return an error + // indicating why (e.g. syserror.ErrWouldBlock). + // + // Write does not check permissions nor flags. + // + // Write must not be called if !FileFlags.Write. + Write(ctx context.Context, file *File, src usermem.IOSequence, offset int64) (int64, error) + + // Fsync writes buffered modifications of file and/or flushes in-flight + // operations to backing storage based on syncType. The range to sync is + // [start, end]. The end is inclusive so that the last byte of a maximally + // sized file can be synced. + Fsync(ctx context.Context, file *File, start, end int64, syncType SyncType) error + + // Flush this file's buffers/state (on close(2)). + Flush(ctx context.Context, file *File) error + + // ConfigureMMap mutates opts to implement mmap(2) for the file. Most + // implementations can either embed fsutil.NoMMap (if they don't support + // memory mapping) or call fsutil.GenericConfigureMMap with the appropriate + // memmap.Mappable. + ConfigureMMap(ctx context.Context, file *File, opts *memmap.MMapOpts) error + + // Ioctl implements the ioctl(2) linux syscall. + // + // io provides access to the virtual memory space to which pointers in args + // refer. + // + // Preconditions: The AddressSpace (if any) that io refers to is activated. + Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) +} diff --git a/pkg/sentry/fs/file_overlay.go b/pkg/sentry/fs/file_overlay.go new file mode 100644 index 000000000..0c6e622b9 --- /dev/null +++ b/pkg/sentry/fs/file_overlay.go @@ -0,0 +1,345 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// overlayFile gets a handle to a file from the upper or lower filesystem +// in an overlay. The caller is responsible for calling File.DecRef on +// the returned file. +func overlayFile(ctx context.Context, inode *Inode, flags FileFlags) (*File, error) { + // Do a song and dance to eventually get to: + // + // File -> single reference + // Dirent -> single reference + // Inode -> multiple references + // + // So that File.DecRef() -> File.destroy -> Dirent.DecRef -> Dirent.destroy, + // and both the transitory File and Dirent can be GC'ed but the Inode + // remains. + + // Take another reference on the Inode. + inode.IncRef() + + // Start with a single reference on the Dirent. It inherits the reference + // we just took on the Inode above. + dirent := NewTransientDirent(inode) + + // Get a File. This will take another reference on the Dirent. + f, err := inode.GetFile(ctx, dirent, flags) + + // Drop the extra reference on the Dirent. Now there's only one reference + // on the dirent, either owned by f (if non-nil), or the Dirent is about + // to be destroyed (if GetFile failed). + dirent.DecRef() + + return f, err +} + +// overlayFileOperations implements FileOperations for a file in an overlay. +type overlayFileOperations struct { + // upperMu protects upper below. In contrast lower is stable. + upperMu sync.Mutex `state:"nosave"` + + // We can't share Files in upper and lower filesystems between all Files + // in an overlay because some file systems expect to get distinct handles + // that are not consistent with each other on open(2). + // + // So we lazily acquire an upper File when the overlayEntry acquires an + // upper Inode (it might have one from the start). This synchronizes with + // copy up. + // + // If upper is non-nil and this is not a directory, then lower is ignored. + // + // For directories, upper and lower are ignored because it is always + // necessary to acquire new directory handles so that the directory cursors + // of the upper and lower Files are not exhausted. + upper *File + lower *File + + // dirCursor is a directory cursor for a directory in an overlay. + dirCursor string + + // dirCache is cache of DentAttrs from upper and lower Inodes. + dirCache *SortedDentryMap +} + +// Release implements FileOperations.Release. +func (f *overlayFileOperations) Release() { + if f.upper != nil { + f.upper.DecRef() + } + if f.lower != nil { + f.lower.DecRef() + } +} + +// EventRegister implements FileOperations.EventRegister. +func (f *overlayFileOperations) EventRegister(we *waiter.Entry, mask waiter.EventMask) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + f.upper.EventRegister(we, mask) + return + } + f.lower.EventRegister(we, mask) +} + +// EventUnregister implements FileOperations.Unregister. +func (f *overlayFileOperations) EventUnregister(we *waiter.Entry) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + f.upper.EventUnregister(we) + return + } + f.lower.EventUnregister(we) +} + +// Readiness implements FileOperations.Readiness. +func (f *overlayFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + f.upperMu.Lock() + defer f.upperMu.Unlock() + if f.upper != nil { + return f.upper.Readiness(mask) + } + return f.lower.Readiness(mask) +} + +// Seek implements FileOperations.Seek. +func (f *overlayFileOperations) Seek(ctx context.Context, file *File, whence SeekWhence, offset int64) (int64, error) { + f.upperMu.Lock() + defer f.upperMu.Unlock() + + var seekDir bool + var n int64 + if f.upper != nil { + var err error + if n, err = f.upper.FileOperations.Seek(ctx, file, whence, offset); err != nil { + return n, err + } + seekDir = IsDir(f.upper.Dirent.Inode.StableAttr) + } else { + var err error + if n, err = f.lower.FileOperations.Seek(ctx, file, whence, offset); err != nil { + return n, err + } + seekDir = IsDir(f.lower.Dirent.Inode.StableAttr) + } + + // If this was a seek on a directory, we must update the cursor. + if seekDir && whence == SeekSet && offset == 0 { + // Currenly only seeking to 0 on a directory is supported. + // FIXME: Lift directory seeking limitations. + f.dirCursor = "" + } + return n, nil +} + +// Readdir implements FileOperations.Readdir. +func (f *overlayFileOperations) Readdir(ctx context.Context, file *File, serializer DentrySerializer) (int64, error) { + o := file.Dirent.Inode.overlay + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + var err error + f.dirCache, err = readdirEntries(ctx, o) + if err != nil { + return file.Offset(), err + } + + root := RootFromContext(ctx) + defer root.DecRef() + + dirCtx := &DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + return DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) +} + +// IterateDir implements DirIterator.IterateDir. +func (f *overlayFileOperations) IterateDir(ctx context.Context, dirCtx *DirCtx, offset int) (int, error) { + n, err := GenericReaddir(dirCtx, f.dirCache) + return offset + n, err +} + +// Read implements FileOperations.Read. +func (f *overlayFileOperations) Read(ctx context.Context, file *File, dst usermem.IOSequence, offset int64) (int64, error) { + o := file.Dirent.Inode.overlay + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + // We may need to acquire an open file handle to read from if + // copy up has occurred. Otherwise we risk reading from the + // wrong source. + f.upperMu.Lock() + if f.upper == nil { + var err error + f.upper, err = overlayFile(ctx, o.upper, file.Flags()) + if err != nil { + f.upperMu.Unlock() + log.Warningf("failed to acquire handle with flags %v: %v", file.Flags(), err) + return 0, syserror.EIO + } + } + f.upperMu.Unlock() + return f.upper.FileOperations.Read(ctx, f.upper, dst, offset) + } + return f.lower.FileOperations.Read(ctx, f.lower, dst, offset) +} + +// Write implements FileOperations.Write. +func (f *overlayFileOperations) Write(ctx context.Context, file *File, src usermem.IOSequence, offset int64) (int64, error) { + // f.upper must be non-nil. See inode_overlay.go:overlayGetFile, where the + // file is copied up and opened in the upper filesystem if FileFlags.Write. + // Write cannot be called if !FileFlags.Write, see FileOperations.Write. + return f.upper.FileOperations.Write(ctx, f.upper, src, offset) +} + +// Fsync implements FileOperations.Fsync. +func (f *overlayFileOperations) Fsync(ctx context.Context, file *File, start, end int64, syncType SyncType) error { + var err error + f.upperMu.Lock() + if f.upper != nil { + err = f.upper.FileOperations.Fsync(ctx, f.upper, start, end, syncType) + } + f.upperMu.Unlock() + if f.lower != nil { + // N.B. Fsync on the lower filesystem can cause writes of file + // attributes (i.e. access time) despite the fact that we must + // treat the lower filesystem as read-only. + // + // This matches the semantics of fsync(2) in Linux overlayfs. + err = f.lower.FileOperations.Fsync(ctx, f.lower, start, end, syncType) + } + return err +} + +// Flush implements FileOperations.Flush. +func (f *overlayFileOperations) Flush(ctx context.Context, file *File) error { + // Flush whatever handles we have. + var err error + f.upperMu.Lock() + if f.upper != nil { + err = f.upper.FileOperations.Flush(ctx, f.upper) + } + f.upperMu.Unlock() + if f.lower != nil { + err = f.lower.FileOperations.Flush(ctx, f.lower) + } + return err +} + +// ConfigureMMap implements FileOperations.ConfigureMMap. +func (*overlayFileOperations) ConfigureMMap(ctx context.Context, file *File, opts *memmap.MMapOpts) error { + o := file.Dirent.Inode.overlay + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if !o.isMappableLocked() { + return syserror.ENODEV + } + // FIXME: This is a copy/paste of fsutil.GenericConfigureMMap, + // which we can't use because the overlay implementation is in package fs, + // so depending on fs/fsutil would create a circular dependency. Move + // overlay to fs/overlay. + opts.Mappable = o + opts.MappingIdentity = file + file.IncRef() + return nil +} + +// Ioctl implements fs.FileOperations.Ioctl and always returns ENOTTY. +func (*overlayFileOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + return 0, syserror.ENOTTY +} + +// readdirEntries returns a sorted map of directory entries from the +// upper and/or lower filesystem. +func readdirEntries(ctx context.Context, o *overlayEntry) (*SortedDentryMap, error) { + // Assert that there is at least one upper or lower entry. + if o.upper == nil && o.lower == nil { + panic("invalid overlayEntry, needs at least one Inode") + } + entries := make(map[string]DentAttr) + + // Try the upper filesystem first. + if o.upper != nil { + var err error + entries, err = readdirOne(ctx, NewTransientDirent(o.upper)) + if err != nil { + return nil, err + } + } + + // Try the lower filesystem next. + if o.lower != nil { + lowerEntries, err := readdirOne(ctx, NewTransientDirent(o.lower)) + if err != nil { + return nil, err + } + for name, entry := range lowerEntries { + // Skip this name if it is a negative entry in the + // upper or there exists a whiteout for it. + if o.upper != nil { + if overlayHasWhiteout(o.upper, name) { + continue + } + } + // Prefer the entries from the upper filesystem + // when names overlap. + if _, ok := entries[name]; !ok { + entries[name] = entry + } + } + } + + // Sort and return the entries. + return NewSortedDentryMap(entries), nil +} + +// readdirOne reads all of the directory entries from d. +func readdirOne(ctx context.Context, d *Dirent) (map[string]DentAttr, error) { + dir, err := d.Inode.GetFile(ctx, d, FileFlags{Read: true}) + if err != nil { + return nil, err + } + defer dir.DecRef() + + // Use a stub serializer to read the entries into memory. + stubSerializer := &CollectEntriesSerializer{} + if err := dir.Readdir(ctx, stubSerializer); err != nil { + return nil, err + } + // The "." and ".." entries are from the overlay Inode's Dirent, not the stub. + delete(stubSerializer.Entries, ".") + delete(stubSerializer.Entries, "..") + return stubSerializer.Entries, nil +} diff --git a/pkg/sentry/fs/file_overlay_test.go b/pkg/sentry/fs/file_overlay_test.go new file mode 100644 index 000000000..407ba8562 --- /dev/null +++ b/pkg/sentry/fs/file_overlay_test.go @@ -0,0 +1,137 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +func TestReaddir(t *testing.T) { + ctx := contexttest.Context(t) + ctx = &rootContext{ + Context: ctx, + root: fs.NewDirent(newTestRamfsDir(ctx, nil, nil), "root"), + } + for _, test := range []struct { + // Test description. + desc string + + // Lookup parameters. + dir *fs.Inode + + // Want from lookup. + err error + names []string + }{ + { + desc: "no upper, lower has entries", + dir: fs.NewTestOverlayDir(ctx, + nil, /* upper */ + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + {name: "b"}, + }, nil), /* lower */ + ), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper has entries, no lower", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + {name: "b"}, + }, nil), /* upper */ + nil, /* lower */ + ), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper and lower, entries combine", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, nil), /* lower */ + newTestRamfsDir(ctx, []dirContent{ + {name: "b"}, + }, nil), /* lower */ + ), + names: []string{".", "..", "a", "b"}, + }, + { + desc: "upper and lower, entries combine, none are masked", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, []string{"b"}), /* lower */ + newTestRamfsDir(ctx, []dirContent{ + {name: "c"}, + }, nil), /* lower */ + ), + names: []string{".", "..", "a", "c"}, + }, + { + desc: "upper and lower, entries combine, upper masks some of lower", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + {name: "a"}, + }, []string{"b"}), /* lower */ + newTestRamfsDir(ctx, []dirContent{ + {name: "b"}, /* will be masked */ + {name: "c"}, + }, nil), /* lower */ + ), + names: []string{".", "..", "a", "c"}, + }, + } { + t.Run(test.desc, func(t *testing.T) { + openDir, err := test.dir.GetFile(ctx, fs.NewDirent(test.dir, "stub"), fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("GetFile got error %v, want nil", err) + } + stubSerializer := &fs.CollectEntriesSerializer{} + err = openDir.Readdir(ctx, stubSerializer) + if err != test.err { + t.Fatalf("Readdir got error %v, want nil", err) + } + if err != nil { + return + } + if !reflect.DeepEqual(stubSerializer.Order, test.names) { + t.Errorf("Readdir got names %v, want %v", stubSerializer.Order, test.names) + } + }) + } +} + +type rootContext struct { + context.Context + root *fs.Dirent +} + +// Value implements context.Context. +func (r *rootContext) Value(key interface{}) interface{} { + switch key { + case fs.CtxRoot: + r.root.IncRef() + return r.root + default: + return r.Context.Value(key) + } +} diff --git a/pkg/sentry/fs/file_state.go b/pkg/sentry/fs/file_state.go new file mode 100644 index 000000000..341cbda0b --- /dev/null +++ b/pkg/sentry/fs/file_state.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// afterLoad is invoked by stateify. +func (f *File) afterLoad() { + f.mu.Init() +} + +// saveFlags is invoked by stateify. +func (f *File) saveFlags() FileFlags { + return f.flags.Load().(FileFlags) +} + +// loadFlags is invoked by stateify. +func (f *File) loadFlags(flags FileFlags) { + f.flags.Store(flags) +} diff --git a/pkg/sentry/fs/file_test.go b/pkg/sentry/fs/file_test.go new file mode 100644 index 000000000..18aee7101 --- /dev/null +++ b/pkg/sentry/fs/file_test.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import "io" + +var ( + _ = io.Reader(&FileReader{}) + _ = io.ReaderAt(&FileReader{}) + _ = io.Writer(&FileWriter{}) + _ = io.WriterAt(&FileWriter{}) +) diff --git a/pkg/sentry/fs/filesystems.go b/pkg/sentry/fs/filesystems.go new file mode 100644 index 000000000..7cd76dfe9 --- /dev/null +++ b/pkg/sentry/fs/filesystems.go @@ -0,0 +1,162 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sort" + "strings" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// FilesystemFlags matches include/linux/fs.h:file_system_type.fs_flags. +type FilesystemFlags int + +const ( + // FilesystemRequiresDev indicates that the file system requires a device name + // on mount. It is used to construct the output of /proc/filesystems. + FilesystemRequiresDev FilesystemFlags = 1 + + // Currently other flags are not used, but can be pulled in from + // include/linux/fs.h:file_system_type as needed. +) + +// Filesystem is a mountable file system. +type Filesystem interface { + // Name is the unique identifier of the file system. It corresponds to the + // filesystemtype argument of sys_mount and will appear in the output of + // /proc/filesystems. + Name() string + + // Flags indicate common properties of the file system. + Flags() FilesystemFlags + + // Mount generates a mountable Inode backed by device and configured + // using file system independent flags and file system dependent + // data options. + Mount(ctx context.Context, device string, flags MountSourceFlags, data string) (*Inode, error) + + // AllowUserMount determines whether mount(2) is allowed to mount a + // file system of this type. + AllowUserMount() bool +} + +// filesystems is the global set of registered file systems. It does not need +// to be saved. Packages registering and unregistering file systems must do so +// before calling save/restore methods. +var filesystems = struct { + // mu protects registered below. + mu sync.Mutex + + // registered is a set of registered Filesystems. + registered map[string]Filesystem +}{ + registered: make(map[string]Filesystem), +} + +// RegisterFilesystem registers a new file system that is visible to mount and +// the /proc/filesystems list. Packages implementing Filesystem should call +// RegisterFilesystem in init(). +func RegisterFilesystem(f Filesystem) { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + if _, ok := filesystems.registered[f.Name()]; ok { + panic(fmt.Sprintf("filesystem already registered at %q", f.Name())) + } + filesystems.registered[f.Name()] = f +} + +// UnregisterFilesystem removes a file system from the global set. To keep the +// file system set compatible with save/restore, UnregisterFilesystem must be +// called before save/restore methods. +// +// For instance, packages may unregister their file system after it is mounted. +// This makes sense for pseudo file systems that should not be visible or +// mountable. See whitelistfs in fs/host/fs.go for one example. +func UnregisterFilesystem(name string) { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + delete(filesystems.registered, name) +} + +// FindFilesystem returns a Filesystem registered at name or (nil, false) if name +// is not a file system type that can be found in /proc/filesystems. +func FindFilesystem(name string) (Filesystem, bool) { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + f, ok := filesystems.registered[name] + return f, ok +} + +// GetFilesystems returns the set of registered filesystems in a consistent order. +func GetFilesystems() []Filesystem { + filesystems.mu.Lock() + defer filesystems.mu.Unlock() + + var ss []Filesystem + for _, s := range filesystems.registered { + ss = append(ss, s) + } + sort.Slice(ss, func(i, j int) bool { return ss[i].Name() < ss[j].Name() }) + return ss +} + +// MountSourceFlags represents all mount option flags as a struct. +type MountSourceFlags struct { + // ReadOnly corresponds to mount(2)'s "MS_RDONLY" and indicates that + // the filesystem should be mounted read-only. + ReadOnly bool + + // NoAtime corresponds to mount(2)'s "MS_NOATIME" and indicates that + // the filesystem should not update access time in-place. + NoAtime bool + + // ForcePageCache causes all filesystem I/O operations to use the page + // cache, even when the platform supports direct mapped I/O. This + // doesn't correspond to any Linux mount options. + ForcePageCache bool +} + +// GenericMountSourceOptions splits a string containing comma separated tokens of the +// format 'key=value' or 'key' into a map of keys and values. For example: +// +// data = "key0=value0,key1,key2=value2" -> map{'key0':'value0','key1':'','key2':'value2'} +// +// If data contains duplicate keys, then the last token wins. +func GenericMountSourceOptions(data string) map[string]string { + options := make(map[string]string) + if len(data) == 0 { + // Don't return a nil map, callers might not be expecting that. + return options + } + + // Parse options and skip empty ones. + for _, opt := range strings.Split(data, ",") { + if len(opt) > 0 { + res := strings.SplitN(opt, "=", 2) + if len(res) == 2 { + options[res[0]] = res[1] + } else { + options[opt] = "" + } + } + } + return options +} diff --git a/pkg/sentry/fs/filetest/BUILD b/pkg/sentry/fs/filetest/BUILD new file mode 100644 index 000000000..51a390d77 --- /dev/null +++ b/pkg/sentry/fs/filetest/BUILD @@ -0,0 +1,35 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "filetest_state", + srcs = [ + "filetest.go", + ], + out = "filetest_state.go", + package = "filetest", +) + +go_library( + name = "filetest", + testonly = 1, + srcs = [ + "filetest.go", + "filetest_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/filetest", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/refs", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/filetest/filetest.go b/pkg/sentry/fs/filetest/filetest.go new file mode 100644 index 000000000..1831aa82f --- /dev/null +++ b/pkg/sentry/fs/filetest/filetest.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filetest provides a test implementation of an fs.File. +package filetest + +import ( + "fmt" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// TestFileOperations is an implementation of the File interface. It provides all +// required methods. +type TestFileOperations struct { + fsutil.NoopRelease `state:"nosave"` + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` +} + +// NewTestFile creates and initializes a new test file. +func NewTestFile(tb testing.TB) *fs.File { + ctx := contexttest.Context(tb) + dirent := fs.NewDirent(anon.NewInode(ctx), "test") + return fs.NewFile(ctx, dirent, fs.FileFlags{}, &TestFileOperations{}) +} + +// Read just fails the request. +func (*TestFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, fmt.Errorf("Readv not implemented") +} + +// Write just fails the request. +func (*TestFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, fmt.Errorf("Writev not implemented") +} diff --git a/pkg/sentry/fs/flags.go b/pkg/sentry/fs/flags.go new file mode 100644 index 000000000..dfa6a3d62 --- /dev/null +++ b/pkg/sentry/fs/flags.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// FileFlags encodes file flags. +type FileFlags struct { + // Direct indicates that I/O should be done directly. + Direct bool + + // NonBlocking indicates that I/O should not block. + NonBlocking bool + + // Sync indicates that any writes should be synchronous. + Sync bool + + // Append indicates this file is append only. + Append bool + + // Read indicates this file is readable. + Read bool + + // Write indicates this file is writeable. + Write bool + + // Pread indicates this file is readable at an arbitrary offset. + Pread bool + + // Pwrite indicates this file is writable at an arbitrary offset. + Pwrite bool + + // Directory indicates that this file must be a directory. + Directory bool +} + +// SettableFileFlags is a subset of FileFlags above that can be changed +// via fcntl(2) using the F_SETFL command. +type SettableFileFlags struct { + // Direct indicates that I/O should be done directly. + Direct bool + + // NonBlocking indicates that I/O should not block. + NonBlocking bool + + // Append indicates this file is append only. + Append bool +} + +// Settable returns the subset of f that are settable. +func (f FileFlags) Settable() SettableFileFlags { + return SettableFileFlags{ + Direct: f.Direct, + NonBlocking: f.NonBlocking, + Append: f.Append, + } +} diff --git a/pkg/sentry/fs/fs.go b/pkg/sentry/fs/fs.go new file mode 100644 index 000000000..f54f767d3 --- /dev/null +++ b/pkg/sentry/fs/fs.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fs implements a virtual filesystem layer. +// +// Specific filesystem implementations must implement the InodeOperations +// interface (inode.go). +// +// The MountNamespace (mounts.go) is used to create a collection of mounts in +// a filesystem rooted at a given Inode. +// +// MountSources (mount.go) form a tree, with each mount holding pointers to its +// parent and children. +// +// Dirents (dirents.go) wrap Inodes in a caching layer. +// +// When multiple locks are to be held at the same time, they should be acquired +// in the following order. +// +// Either: +// File.mu +// Locks in FileOperations implementations +// goto Dirent-Locks +// +// Or: +// MountNamespace.mu +// goto Dirent-Locks +// +// Dirent-Locks: +// renameMu +// Dirent.dirMu +// Dirent.mu +// DirentCache.mu +// Locks in InodeOperations implementations or overlayEntry +// Inode.Watches.mu (see `Inotify` for other lock ordering) +// MountSource.mu +// +// If multiple Dirent or MountSource locks must be taken, locks in the parent must be +// taken before locks in their children. +// +// If locks must be taken on multiple unrelated Dirents, renameMu must be taken +// first. See lockForRename. +package fs + +import ( + "sync" +) + +// work is a sync.WaitGroup that can be used to queue asynchronous operations +// via Do. Callers can use Barrier to ensure no operations are outstanding. +var work sync.WaitGroup + +// AsyncBarrier waits for all outstanding asynchronous work to complete. +func AsyncBarrier() { + work.Wait() +} + +// Async executes a function asynchronously. +func Async(f func()) { + work.Add(1) + go func() { // S/R-SAFE: Barrier must be called. + defer work.Done() // Ensure Done in case of panic. + f() + }() +} + +// ErrSaveRejection indicates a failed save due to unsupported file system state +// such as dangling open fd, etc. +type ErrSaveRejection struct { + // Err is the wrapped error. + Err error +} + +// Error returns a sensible description of the save rejection error. +func (e ErrSaveRejection) Error() string { + return "save rejected due to unsupported file system state: " + e.Err.Error() +} diff --git a/pkg/sentry/fs/fsutil/BUILD b/pkg/sentry/fs/fsutil/BUILD new file mode 100644 index 000000000..4fa6395f7 --- /dev/null +++ b/pkg/sentry/fs/fsutil/BUILD @@ -0,0 +1,149 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "fsutil_state", + srcs = [ + "dirty_set_impl.go", + "file.go", + "file_range_set_impl.go", + "frame_ref_set_impl.go", + "handle.go", + "host_file_mapper.go", + "host_file_mapper_state.go", + "inode.go", + "inode_cached.go", + ], + out = "fsutil_state.go", + package = "fsutil", +) + +go_template_instance( + name = "dirty_set_impl", + out = "dirty_set_impl.go", + imports = { + "memmap": "gvisor.googlesource.com/gvisor/pkg/sentry/memmap", + "platform": "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "Dirty", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "memmap.MappableRange", + "Value": "DirtyInfo", + "Functions": "dirtySetFunctions", + }, +) + +go_template_instance( + name = "frame_ref_set_impl", + out = "frame_ref_set_impl.go", + imports = { + "platform": "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "frameRef", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "platform.FileRange", + "Value": "uint64", + "Functions": "frameRefSetFunctions", + }, +) + +go_template_instance( + name = "file_range_set_impl", + out = "file_range_set_impl.go", + imports = { + "memmap": "gvisor.googlesource.com/gvisor/pkg/sentry/memmap", + "platform": "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + }, + package = "fsutil", + prefix = "FileRange", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "memmap.MappableRange", + "Value": "uint64", + "Functions": "fileRangeSetFunctions", + }, +) + +go_library( + name = "fsutil", + srcs = [ + "dirty_set.go", + "dirty_set_impl.go", + "file.go", + "file_range_set.go", + "file_range_set_impl.go", + "frame_ref_set.go", + "frame_ref_set_impl.go", + "fsutil.go", + "fsutil_state.go", + "handle.go", + "host_file_mapper.go", + "host_file_mapper_state.go", + "host_file_mapper_unsafe.go", + "inode.go", + "inode_cached.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_test( + name = "fsutil_x_test", + size = "small", + srcs = ["handle_test.go"], + deps = [ + ":fsutil", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ramfs/test", + "//pkg/sentry/usermem", + ], +) + +go_test( + name = "fsutil_test", + size = "small", + srcs = [ + "dirty_set_test.go", + "inode_cached_test.go", + ], + embed = [":fsutil"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/safemem", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/fsutil/README.md b/pkg/sentry/fs/fsutil/README.md new file mode 100644 index 000000000..d3780e9fa --- /dev/null +++ b/pkg/sentry/fs/fsutil/README.md @@ -0,0 +1,207 @@ +This package provides utilities for implementing virtual filesystem objects. + +[TOC] + +## Page cache + +`CachingInodeOperations` implements a page cache for files that cannot use the +host page cache. Normally these are files that store their data in a remote +filesystem. This also applies to files that are accessed on a platform that does +not support directly memory mapping host file descriptors (e.g. the ptrace +platform). + +An `CachingInodeOperations` buffers regions of a single file into memory. It is +owned by an `fs.Inode`, the in-memory representation of a file (all open file +descriptors are backed by an `fs.Inode`). The `fs.Inode` provides operations for +reading memory into an `CachingInodeOperations`, to represent the contents of +the file in-memory, and for writing memory out, to relieve memory pressure on +the kernel and to synchronize in-memory changes to filesystems. + +An `CachingInodeOperations` enables readable and/or writable memory access to +file content. Files can be mapped shared or private, see mmap(2). When a file is +mapped shared, changes to the file via write(2) and truncate(2) are reflected in +the shared memory region. Conversely, when the shared memory region is modified, +changes to the file are visible via read(2). Multiple shared mappings of the +same file are coherent with each other. This is consistent with Linux. + +When a file is mapped private, updates to the mapped memory are not visible to +other memory mappings. Updates to the mapped memory are also not reflected in +the file content as seen by read(2). If the file is changed after a private +mapping is created, for instance by write(2), the change to the file may or may +not be reflected in the private mapping. This is consistent with Linux. + +An `CachingInodeOperations` keeps track of ranges of memory that were modified +(or "dirtied"). When the file is explicitly synced via fsync(2), only the dirty +ranges are written out to the filesystem. Any error returned indicates a failure +to write all dirty memory of an `CachingInodeOperations` to the filesystem. In +this case the filesystem may be in an inconsistent state. The same operation can +be performed on the shared memory itself using msync(2). If neither fsync(2) nor +msync(2) is performed, then the dirty memory is written out in accordance with +the `CachingInodeOperations` eviction strategy (see below) and there is no +guarantee that memory will be written out successfully in full. + +### Memory allocation and eviction + +An `CachingInodeOperations` implements the following allocation and eviction +strategy: + +- Memory is allocated and brought up to date with the contents of a file when + a region of mapped memory is accessed (or "faulted on"). + +- Dirty memory is written out to filesystems when an fsync(2) or msync(2) + operation is performed on a memory mapped file, for all memory mapped files + when saved, and/or when there are no longer any memory mappings of a range + of a file, see munmap(2). As the latter implies, in the absence of a panic + or SIGKILL, dirty memory is written out for all memory mapped files when an + application exits. + +- Memory is freed when there are no longer any memory mappings of a range of a + file (e.g. when an application exits). This behavior is consistent with + Linux for shared memory that has been locked via mlock(2). + +Notably, memory is not allocated for read(2) or write(2) operations. This means +that reads and writes to the file are only accelerated by an +`CachingInodeOperations` if the file being read or written has been memory +mapped *and* if the shared memory has been accessed at the region being read or +written. This diverges from Linux which buffers memory into a page cache on +read(2) proactively (i.e. readahead) and delays writing it out to filesystems on +write(2) (i.e. writeback). The absence of these optimizations is not visible to +applications beyond less than optimal performance when repeatedly reading and/or +writing to same region of a file. See [Future Work](#future-work) for plans to +implement these optimizations. + +Additionally, memory held by `CachingInodeOperationss` is currently unbounded in +size. An `CachingInodeOperations` does not write out dirty memory and free it +under system memory pressure. This can cause pathological memory usage. + +When memory is written back, an `CachingInodeOperations` may write regions of +shared memory that were never modified. This is due to the strategy of +minimizing page faults (see below) and handling only a subset of memory write +faults. In the absence of an application or sentry crash, it is guaranteed that +if a region of shared memory was written to, it is written back to a filesystem. + +### Life of a shared memory mapping + +A file is memory mapped via mmap(2). For example, if `A` is an address, an +application may execute: + +``` +mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); +``` + +This creates a shared mapping of fd that reflects 4k of the contents of fd +starting at offset 0, accessible at address `A`. This in turn creates a virtual +memory area region ("vma") which indicates that [`A`, `A`+0x1000) is now a valid +address range for this application to access. + +At this point, memory has not been allocated in the file's +`CachingInodeOperations`. It is also the case that the address range [`A`, +`A`+0x1000) has not been mapped on the host on behalf of the application. If the +application then tries to modify 8 bytes of the shared memory: + +``` +char buffer[] = "aaaaaaaa"; +memcpy(A, buffer, 8); +``` + +The host then sends a `SIGSEGV` to the sentry because the address range [`A`, +`A`+8) is not mapped on the host. The `SIGSEGV` indicates that the memory was +accessed writable. The sentry looks up the vma associated with [`A`, `A`+8), +finds the file that was mapped and its `CachingInodeOperations`. It then calls +`CachingInodeOperations.MapInto` which allocates memory to back [`A`, `A`+8). It +may choose to allocate more memory (i.e. do "readahead") to minimize subsequent +faults. + +Memory that is allocated comes from a host tmpfs file (see `filemem.FileMem`). +The host tmpfs file memory is brought up to date with the contents of the mapped +file on its filesystem. The region of the host tmpfs file that reflects the +mapped file is then mapped into the host address space of the application so +that subsequent memory accesses do not repeatedly generate a `SIGSEGV`. + +The range that was allocated, including any extra memory allocation to minimize +faults, is marked dirty due to the write fault. This overcounts dirty memory if +the extra memory allocated is never modified. + +To make the scenario more interesting, imagine that this application spawns +another process and maps the same file in the exact same way: + +``` +mmap(A, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); +``` + +Imagine that this process then tries to modify the file again but with only 4 +bytes: + +``` +char buffer[] = "bbbb"; +memcpy(A, buffer, 4); +``` + +Since the first process has already mapped and accessed the same region of the +file writable, `CachingInodeOperations.MapInto` is called but re-maps the memory +that has already been allocated (because the host mapping can be invalidated at +any time) rather than allocating new memory. The address range [`A`, `A`+0x1000) +reflects the same cached view of the file as the first process sees. For +example, reading 8 bytes from the file from either process via read(2) starting +at offset 0 returns a consistent "bbbbaaaa". + +When this process no longer needs the shared memory, it may do: + +``` +munmap(A, 0x1000); +``` + +At this point, the modified memory cached by the `CachingInodeOperations` is not +written back to the file because it is still in use by the first process that +mapped it. When the first process also does: + +``` +munmap(A, 0x1000); +``` + +Then the last memory mapping of the file at the range [0, 0x1000) is gone. The +file's `CachingInodeOperations` then starts writing back memory marked dirty to +the file on its filesystem. Once writing completes, regardless of whether it was +successful, the `CachingInodeOperations` frees the memory cached at the range +[0, 0x1000). + +Subsequent read(2) or write(2) operations on the file go directly to the +filesystem since there no longer exists memory for it in its +`CachingInodeOperations`. + +## Future Work + +### Page cache + +The sentry does not yet implement the readahead and writeback optimizations for +read(2) and write(2) respectively. To do so, on read(2) and/or write(2) the +sentry must ensure that memory is allocated in a page cache to read or write +into. However, the sentry cannot boundlessly allocate memory. If it did, the +host would eventually OOM-kill the sentry+application process. This means that +the sentry must implement a page cache memory allocation strategy that is +bounded by a global user or container imposed limit. When this limit is +approached, the sentry must decide from which page cache memory should be freed +so that it can allocate more memory. If it makes a poor decision, the sentry may +end up freeing and re-allocating memory to back regions of files that are +frequently used, nullifying the optimization (and in some cases causing worse +performance due to the overhead of memory allocation and general management). +This is a form of "cache thrashing". + +In Linux, much research has been done to select and implement a lightweight but +optimal page cache eviction algorithm. Linux makes use of hardware page bits to +keep track of whether memory has been accessed. The sentry does not have direct +access to hardware. Implementing a similarly lightweight and optimal page cache +eviction algorithm will need to either introduce a kernel interface to obtain +these page bits or find a suitable alternative proxy for access events. + +In Linux, readahead happens by default but is not always ideal. For instance, +for files that are not read sequentially, it would be more ideal to simply read +from only those regions of the file rather than to optimistically cache some +number of bytes ahead of the read (up to 2MB in Linux) if the bytes cached won't +be accessed. Linux implements the fadvise64(2) system call for applications to +specify that a range of a file will not be accessed sequentially. The advice bit +FADV_RANDOM turns off the readahead optimization for the given range in the +given file. However fadvise64 is rarely used by applications so Linux implements +a readahead backoff strategy if reads are not sequential. To ensure that +application performance is not degraded, the sentry must implement a similar +backoff strategy. diff --git a/pkg/sentry/fs/fsutil/dirty_set.go b/pkg/sentry/fs/fsutil/dirty_set.go new file mode 100644 index 000000000..9c6c98542 --- /dev/null +++ b/pkg/sentry/fs/fsutil/dirty_set.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to +// implement Mappables that cache data from another source. +// +// type DirtySet <generated by go_generics> + +// DirtyInfo is the value type of DirtySet, and represents information about a +// Mappable offset that is dirty (the cached data for that offset is newer than +// its source). +type DirtyInfo struct { + // Keep is true if the represented offset is concurrently writable, such + // that writing the data for that offset back to the source does not + // guarantee that the offset is clean (since it may be concurrently + // rewritten after the writeback). + Keep bool +} + +// dirtySetFunctions implements segment.Functions for DirtySet. +type dirtySetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (dirtySetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (dirtySetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (dirtySetFunctions) ClearValue(val *DirtyInfo) { +} + +// Merge implements segment.Functions.Merge. +func (dirtySetFunctions) Merge(_ memmap.MappableRange, val1 DirtyInfo, _ memmap.MappableRange, val2 DirtyInfo) (DirtyInfo, bool) { + if val1 != val2 { + return DirtyInfo{}, false + } + return val1, true +} + +// Split implements segment.Functions.Split. +func (dirtySetFunctions) Split(_ memmap.MappableRange, val DirtyInfo, _ uint64) (DirtyInfo, DirtyInfo) { + return val, val +} + +// MarkClean marks all offsets in mr as not dirty, except for those to which +// KeepDirty has been applied. +func (ds *DirtySet) MarkClean(mr memmap.MappableRange) { + seg := ds.LowerBoundSegment(mr.Start) + for seg.Ok() && seg.Start() < mr.End { + if seg.Value().Keep { + seg = seg.NextSegment() + continue + } + seg = ds.Isolate(seg, mr) + seg = ds.Remove(seg).NextSegment() + } +} + +// KeepClean marks all offsets in mr as not dirty, even those that were +// previously kept dirty by KeepDirty. +func (ds *DirtySet) KeepClean(mr memmap.MappableRange) { + ds.RemoveRange(mr) +} + +// MarkDirty marks all offsets in mr as dirty. +func (ds *DirtySet) MarkDirty(mr memmap.MappableRange) { + ds.setDirty(mr, false) +} + +// KeepDirty marks all offsets in mr as dirty and prevents them from being +// marked as clean by MarkClean. +func (ds *DirtySet) KeepDirty(mr memmap.MappableRange) { + ds.setDirty(mr, true) +} + +func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) { + var changedAny bool + defer func() { + if changedAny { + ds.MergeRange(mr) + } + }() + seg, gap := ds.Find(mr.Start) + for { + switch { + case seg.Ok() && seg.Start() < mr.End: + if keep && !seg.Value().Keep { + changedAny = true + seg = ds.Isolate(seg, mr) + seg.ValuePtr().Keep = true + } + seg, gap = seg.NextNonEmpty() + + case gap.Ok() && gap.Start() < mr.End: + changedAny = true + seg = ds.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep}) + seg, gap = seg.NextNonEmpty() + + default: + return + } + } +} + +// SyncDirty passes pages in the range mr that are stored in cache and +// identified as dirty to writeAt, updating dirty to reflect successful writes. +// If writeAt returns a successful partial write, SyncDirty will call it +// repeatedly until all bytes have been written. max is the true size of the +// cached object; offsets beyond max will not be passed to writeAt, even if +// they are marked dirty. +func SyncDirty(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, dirty *DirtySet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + var changedDirty bool + defer func() { + if changedDirty { + dirty.MergeRange(mr) + } + }() + dseg := dirty.LowerBoundSegment(mr.Start) + for dseg.Ok() && dseg.Start() < mr.End { + var dr memmap.MappableRange + if dseg.Value().Keep { + dr = dseg.Range().Intersect(mr) + } else { + changedDirty = true + dseg = dirty.Isolate(dseg, mr) + dr = dseg.Range() + } + if err := syncDirtyRange(ctx, dr, cache, max, mem, writeAt); err != nil { + return err + } + if dseg.Value().Keep { + dseg = dseg.NextSegment() + } else { + dseg = dirty.Remove(dseg).NextSegment() + } + } + return nil +} + +// SyncDirtyAll passes all pages stored in cache identified as dirty to +// writeAt, updating dirty to reflect successful writes. If writeAt returns a +// successful partial write, SyncDirtyAll will call it repeatedly until all +// bytes have been written. max is the true size of the cached object; offsets +// beyond max will not be passed to writeAt, even if they are marked dirty. +func SyncDirtyAll(ctx context.Context, cache *FileRangeSet, dirty *DirtySet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + dseg := dirty.FirstSegment() + for dseg.Ok() { + if err := syncDirtyRange(ctx, dseg.Range(), cache, max, mem, writeAt); err != nil { + return err + } + if dseg.Value().Keep { + dseg = dseg.NextSegment() + } else { + dseg = dirty.Remove(dseg).NextSegment() + } + } + return nil +} + +// Preconditions: mr must be page-aligned. +func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, max uint64, mem platform.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error { + for cseg := cache.LowerBoundSegment(mr.Start); cseg.Ok() && cseg.Start() < mr.End; cseg = cseg.NextSegment() { + wbr := cseg.Range().Intersect(mr) + if max < wbr.Start { + break + } + ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), usermem.Read) + if err != nil { + return err + } + if max < wbr.End { + ims = ims.TakeFirst64(max - wbr.Start) + } + offset := wbr.Start + for !ims.IsEmpty() { + n, err := writeAt(ctx, ims, offset) + if err != nil { + return err + } + offset += n + ims = ims.DropFirst64(n) + } + } + return nil +} diff --git a/pkg/sentry/fs/fsutil/dirty_set_test.go b/pkg/sentry/fs/fsutil/dirty_set_test.go new file mode 100644 index 000000000..f7693cb19 --- /dev/null +++ b/pkg/sentry/fs/fsutil/dirty_set_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func TestDirtySet(t *testing.T) { + var set DirtySet + set.MarkDirty(memmap.MappableRange{0, 2 * usermem.PageSize}) + set.KeepDirty(memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize}) + set.MarkClean(memmap.MappableRange{0, 2 * usermem.PageSize}) + want := &DirtySegmentDataSlices{ + Start: []uint64{usermem.PageSize}, + End: []uint64{2 * usermem.PageSize}, + Values: []DirtyInfo{{Keep: true}}, + } + if got := set.ExportSortedSlices(); !reflect.DeepEqual(got, want) { + t.Errorf("set:\n\tgot %v,\n\twant %v", got, want) + } +} diff --git a/pkg/sentry/fs/fsutil/file.go b/pkg/sentry/fs/fsutil/file.go new file mode 100644 index 000000000..a7329f1c9 --- /dev/null +++ b/pkg/sentry/fs/fsutil/file.go @@ -0,0 +1,267 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// NoopRelease implements FileOperations.Release for files that have no +// resources to release. +type NoopRelease struct{} + +// Release is a no-op. +func (NoopRelease) Release() {} + +// SeekWithDirCursor is used to implement fs.FileOperations.Seek. If dirCursor +// is not nil and the seek was on a directory, the cursor will be updated. +// +// Currenly only seeking to 0 on a directory is supported. +// +// FIXME: Lift directory seeking limitations. +func SeekWithDirCursor(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64, dirCursor *string) (int64, error) { + inode := file.Dirent.Inode + current := file.Offset() + + // Does the Inode represents a non-seekable type? + if fs.IsPipe(inode.StableAttr) || fs.IsSocket(inode.StableAttr) { + return current, syserror.ESPIPE + } + + // Does the Inode represent a character device? + if fs.IsCharDevice(inode.StableAttr) { + // Ignore seek requests. + // + // FIXME: This preserves existing + // behavior but is not universally correct. + return 0, nil + } + + // Otherwise compute the new offset. + switch whence { + case fs.SeekSet: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.SpecialFile, fs.BlockDevice: + if offset < 0 { + return current, syserror.EINVAL + } + return offset, nil + case fs.Directory, fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + // SEEK_SET to 0 moves the directory "cursor" to the beginning. + if dirCursor != nil { + *dirCursor = "" + } + return 0, nil + default: + return current, syserror.EINVAL + } + case fs.SeekCurrent: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.SpecialFile, fs.BlockDevice: + if current+offset < 0 { + return current, syserror.EINVAL + } + return current + offset, nil + case fs.Directory, fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + return current, nil + default: + return current, syserror.EINVAL + } + case fs.SeekEnd: + switch inode.StableAttr.Type { + case fs.RegularFile, fs.BlockDevice: + // Allow the file to determine the end. + uattr, err := inode.UnstableAttr(ctx) + if err != nil { + return current, err + } + sz := uattr.Size + if sz+offset < 0 { + return current, syserror.EINVAL + } + return sz + offset, nil + // FIXME: This is not universally correct. + // Remove SpecialDirectory. + case fs.SpecialDirectory: + if offset != 0 { + return current, syserror.EINVAL + } + // SEEK_END to 0 moves the directory "cursor" to the end. + // + // FIXME: The ensures that after the seek, + // reading on the directory will get EOF. But it is not + // correct in general because the directory can grow in + // size; attempting to read those new entries will be + // futile (EOF will always be the result). + return fs.FileMaxOffset, nil + default: + return current, syserror.EINVAL + } + } + + // Not a valid seek request. + return current, syserror.EINVAL +} + +// GenericSeek implements FileOperations.Seek for files that use a generic +// seek implementation. +type GenericSeek struct{} + +// Seek implements fs.FileOperations.Seek. +func (GenericSeek) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return SeekWithDirCursor(ctx, file, whence, offset, nil) +} + +// ZeroSeek implements FileOperations.Seek for files that maintain a constant +// zero-value offset and require a no-op Seek. +type ZeroSeek struct{} + +// Seek implements FileOperations.Seek. +func (ZeroSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) { + return 0, nil +} + +// PipeSeek implements FileOperations.Seek and can be used for files that behave +// like pipes (seeking is not supported). +type PipeSeek struct{} + +// Seek implements FileOperations.Seek. +func (PipeSeek) Seek(context.Context, *fs.File, fs.SeekWhence, int64) (int64, error) { + return 0, syserror.ESPIPE +} + +// NotDirReaddir implements FileOperations.Readdir for non-directories. +type NotDirReaddir struct{} + +// Readdir implements FileOperations.NotDirReaddir. +func (NotDirReaddir) Readdir(context.Context, *fs.File, fs.DentrySerializer) (int64, error) { + return 0, syserror.ENOTDIR +} + +// NoFsync implements FileOperations.Fsync for files that don't support syncing. +type NoFsync struct{} + +// Fsync implements FileOperations.Fsync. +func (NoFsync) Fsync(context.Context, *fs.File, int64, int64, fs.SyncType) error { + return syserror.EINVAL +} + +// NoopFsync implements FileOperations.Fsync for files that don't need to synced. +type NoopFsync struct{} + +// Fsync implements FileOperations.Fsync. +func (NoopFsync) Fsync(context.Context, *fs.File, int64, int64, fs.SyncType) error { + return nil +} + +// NoopFlush implements FileOperations.Flush as a no-op. +type NoopFlush struct{} + +// Flush implements FileOperations.Flush. +func (NoopFlush) Flush(context.Context, *fs.File) error { + return nil +} + +// NoMMap implements fs.FileOperations.Mappable for files that cannot +// be memory mapped. +type NoMMap struct{} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (NoMMap) ConfigureMMap(context.Context, *fs.File, *memmap.MMapOpts) error { + return syserror.ENODEV +} + +// GenericConfigureMMap implements fs.FileOperations.ConfigureMMap for most +// filesystems that support memory mapping. +func GenericConfigureMMap(file *fs.File, m memmap.Mappable, opts *memmap.MMapOpts) error { + opts.Mappable = m + opts.MappingIdentity = file + file.IncRef() + return nil +} + +// NoIoctl implements fs.FileOperations.Ioctl for files that don't implement +// the ioctl syscall. +type NoIoctl struct{} + +// Ioctl implements fs.FileOperations.Ioctl. +func (NoIoctl) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + return 0, syserror.ENOTTY +} + +// DirFileOperations implements FileOperations for directories. +type DirFileOperations struct { + waiter.AlwaysReady `state:"nosave"` + NoopRelease `state:"nosave"` + GenericSeek `state:"nosave"` + NoFsync `state:"nosave"` + NoopFlush `state:"nosave"` + NoMMap `state:"nosave"` + NoIoctl `state:"nosave"` + + // dentryMap is a SortedDentryMap used to implement Readdir. + dentryMap *fs.SortedDentryMap + + // dirCursor contains the name of the last directory entry that was + // serialized. + dirCursor string +} + +// NewDirFileOperations returns a new DirFileOperations that will iterate the +// given denty map. +func NewDirFileOperations(dentries *fs.SortedDentryMap) *DirFileOperations { + return &DirFileOperations{ + dentryMap: dentries, + } +} + +// IterateDir implements DirIterator.IterateDir. +func (dfo *DirFileOperations) IterateDir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + n, err := fs.GenericReaddir(dirCtx, dfo.dentryMap) + return offset + n, err +} + +// Readdir implements FileOperations.Readdir. +func (dfo *DirFileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &dfo.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, dfo, root, dirCtx, file.Offset()) +} + +// Read implements FileOperations.Read +func (*DirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} + +// Write implements FileOperations.Write. +func (*DirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} diff --git a/pkg/sentry/fs/fsutil/file_range_set.go b/pkg/sentry/fs/fsutil/file_range_set.go new file mode 100644 index 000000000..da6949ccb --- /dev/null +++ b/pkg/sentry/fs/fsutil/file_range_set.go @@ -0,0 +1,208 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "io" + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// FileRangeSet maps offsets into a memmap.Mappable to offsets into a +// platform.File. It is used to implement Mappables that store data in +// sparsely-allocated memory. +// +// type FileRangeSet <generated by go_generics> + +// fileRangeSetFunctions implements segment.Functions for FileRangeSet. +type fileRangeSetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (fileRangeSetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (fileRangeSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (fileRangeSetFunctions) ClearValue(_ *uint64) { +} + +// Merge implements segment.Functions.Merge. +func (fileRangeSetFunctions) Merge(mr1 memmap.MappableRange, frstart1 uint64, _ memmap.MappableRange, frstart2 uint64) (uint64, bool) { + if frstart1+mr1.Length() != frstart2 { + return 0, false + } + return frstart1, true +} + +// Split implements segment.Functions.Split. +func (fileRangeSetFunctions) Split(mr memmap.MappableRange, frstart uint64, split uint64) (uint64, uint64) { + return frstart, frstart + (split - mr.Start) +} + +// FileRange returns the FileRange mapped by seg. +func (seg FileRangeIterator) FileRange() platform.FileRange { + return seg.FileRangeOf(seg.Range()) +} + +// FileRangeOf returns the FileRange mapped by mr. +// +// Preconditions: seg.Range().IsSupersetOf(mr). mr.Length() != 0. +func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) platform.FileRange { + frstart := seg.Value() + (mr.Start - seg.Start()) + return platform.FileRange{frstart, frstart + mr.Length()} +} + +// Fill attempts to ensure that all memmap.Mappable offsets in required are +// mapped to a platform.File offset, by allocating from mem with the given +// memory usage kind and invoking readAt to store data into memory. (If readAt +// returns a successful partial read, Fill will call it repeatedly until all +// bytes have been read.) EOF is handled consistently with the requirements of +// mmap(2): bytes after EOF on the same page are zeroed; pages after EOF are +// invalid. +// +// Fill may read offsets outside of required, but will never read offsets +// outside of optional. It returns a non-nil error if any error occurs, even +// if the error only affects offsets in optional, but not in required. +// +// Preconditions: required.Length() > 0. optional.IsSupersetOf(required). +// required and optional must be page-aligned. +func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, mem platform.Memory, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { + gap := frs.LowerBoundGap(required.Start) + for gap.Ok() && gap.Start() < required.End { + if gap.Range().Length() == 0 { + gap = gap.NextGap() + continue + } + gr := gap.Range().Intersect(optional) + + // Read data into the gap. + fr, err := platform.AllocateAndFill(mem, gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + var done uint64 + for !dsts.IsEmpty() { + n, err := readAt(ctx, dsts, gr.Start+done) + done += n + dsts = dsts.DropFirst64(n) + if err != nil { + if err == io.EOF { + // platform.AllocateAndFill truncates down to a page + // boundary, but FileRangeSet.Fill is supposed to + // zero-fill to the end of the page in this case. + donepgaddr, ok := usermem.Addr(done).RoundUp() + if donepg := uint64(donepgaddr); ok && donepg != done { + dsts.DropFirst64(donepg - done) + done = donepg + if dsts.IsEmpty() { + return done, nil + } + } + } + return done, err + } + } + return done, nil + })) + + // Store anything we managed to read into the cache. + if done := fr.Length(); done != 0 { + gr.End = gr.Start + done + gap = frs.Insert(gap, gr, fr.Start).NextGap() + } + + if err != nil { + return err + } + } + return nil +} + +// Drop removes segments for memmap.Mappable offsets in mr, freeing the +// corresponding platform.FileRanges. +// +// Preconditions: mr must be page-aligned. +func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mem platform.Memory) { + seg := frs.LowerBoundSegment(mr.Start) + for seg.Ok() && seg.Start() < mr.End { + seg = frs.Isolate(seg, mr) + mem.DecRef(seg.FileRange()) + seg = frs.Remove(seg).NextSegment() + } +} + +// DropAll removes all segments in mr, freeing the corresponding +// platform.FileRanges. +func (frs *FileRangeSet) DropAll(mem platform.Memory) { + for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + mem.DecRef(seg.FileRange()) + } + frs.RemoveAll() +} + +// Truncate updates frs to reflect Mappable truncation to the given length: +// bytes after the new EOF on the same page are zeroed, and pages after the new +// EOF are freed. +func (frs *FileRangeSet) Truncate(end uint64, mem platform.Memory) { + pgendaddr, ok := usermem.Addr(end).RoundUp() + if ok { + pgend := uint64(pgendaddr) + + // Free truncated pages. + frs.SplitAt(pgend) + seg := frs.LowerBoundSegment(pgend) + for seg.Ok() { + mem.DecRef(seg.FileRange()) + seg = frs.Remove(seg).NextSegment() + } + + if end == pgend { + return + } + } + + // Here we know end < end.RoundUp(). If the new EOF lands in the + // middle of a page that we have, zero out its contents beyond the new + // length. + seg := frs.FindSegment(end) + if seg.Ok() { + fr := seg.FileRange() + fr.Start += end - seg.Start() + ims, err := mem.MapInternal(fr, usermem.Write) + if err != nil { + // There's no good recourse from here. This means + // that we can't keep cached memory consistent with + // the new end of file. The caller may have already + // updated the file size on their backing file system. + // + // We don't want to risk blindly continuing onward, + // so in the extremely rare cases this does happen, + // we abandon ship. + panic(fmt.Sprintf("Failed to map %v: %v", fr, err)) + } + if _, err := safemem.ZeroSeq(ims); err != nil { + panic(fmt.Sprintf("Zeroing %v failed: %v", fr, err)) + } + } +} diff --git a/pkg/sentry/fs/fsutil/frame_ref_set.go b/pkg/sentry/fs/fsutil/frame_ref_set.go new file mode 100644 index 000000000..14dece315 --- /dev/null +++ b/pkg/sentry/fs/fsutil/frame_ref_set.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" +) + +type frameRefSetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (frameRefSetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (frameRefSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (frameRefSetFunctions) ClearValue(val *uint64) { +} + +// Merge implements segment.Functions.Merge. +func (frameRefSetFunctions) Merge(_ platform.FileRange, val1 uint64, _ platform.FileRange, val2 uint64) (uint64, bool) { + if val1 != val2 { + return 0, false + } + return val1, true +} + +// Split implements segment.Functions.Split. +func (frameRefSetFunctions) Split(_ platform.FileRange, val uint64, _ uint64) (uint64, uint64) { + return val, val +} diff --git a/pkg/sentry/fs/fsutil/fsutil.go b/pkg/sentry/fs/fsutil/fsutil.go new file mode 100644 index 000000000..6fe4ef13d --- /dev/null +++ b/pkg/sentry/fs/fsutil/fsutil.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fsutil provides utilities for implementing fs.InodeOperations +// and fs.FileOperations: +// +// - For embeddable utilities, see inode.go and file.go. +// +// - For fs.Inodes that require a page cache to be memory mapped, see +// inode_cache.go. +// +// - For fs.Files that implement fs.HandleOps, see handle.go. +// +// - For anon fs.Inodes, see anon.go. +package fsutil diff --git a/pkg/sentry/fs/fsutil/handle.go b/pkg/sentry/fs/fsutil/handle.go new file mode 100644 index 000000000..149c0f84a --- /dev/null +++ b/pkg/sentry/fs/fsutil/handle.go @@ -0,0 +1,126 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Handle implements FileOperations. +// +// FIXME: Remove Handle entirely in favor of individual fs.File +// implementations using simple generic utilities. +type Handle struct { + NoopRelease `state:"nosave"` + NoIoctl `state:"nosave"` + HandleOperations fs.HandleOperations + + // dirCursor is the directory cursor. + dirCursor string +} + +// NewHandle returns a File backed by the Dirent and FileFlags. +func NewHandle(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags, hops fs.HandleOperations) *fs.File { + if !fs.IsPipe(dirent.Inode.StableAttr) && !fs.IsSocket(dirent.Inode.StableAttr) { + // Allow reading/writing at an arbitrary offset for non-pipes + // and non-sockets. + flags.Pread = true + flags.Pwrite = true + } + + return fs.NewFile(ctx, dirent, flags, &Handle{HandleOperations: hops}) +} + +// Readiness implements waiter.Waitable.Readiness. +func (h *Handle) Readiness(mask waiter.EventMask) waiter.EventMask { + return h.HandleOperations.Readiness(mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (h *Handle) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + h.HandleOperations.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (h *Handle) EventUnregister(e *waiter.Entry) { + h.HandleOperations.EventUnregister(e) +} + +// Readdir implements FileOperations.Readdir. +func (h *Handle) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &h.dirCursor, + } + n, err := fs.DirentReaddir(ctx, file.Dirent, h, root, dirCtx, file.Offset()) + return n, err +} + +// Seek implements FileOperations.Seek. +func (h *Handle) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return SeekWithDirCursor(ctx, file, whence, offset, &h.dirCursor) +} + +// IterateDir implements DirIterator.IterateDir. +func (h *Handle) IterateDir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + return h.HandleOperations.DeprecatedReaddir(ctx, dirCtx, offset) +} + +// Read implements FileOperations.Read. +func (h *Handle) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + return h.HandleOperations.DeprecatedPreadv(ctx, dst, offset) +} + +// Write implements FileOperations.Write. +func (h *Handle) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + return h.HandleOperations.DeprecatedPwritev(ctx, src, offset) +} + +// Fsync implements FileOperations.Fsync. +func (h *Handle) Fsync(ctx context.Context, file *fs.File, start int64, end int64, syncType fs.SyncType) error { + switch syncType { + case fs.SyncAll, fs.SyncData: + // Write out metadata. + if err := file.Dirent.Inode.WriteOut(ctx); err != nil { + return err + } + fallthrough + case fs.SyncBackingStorage: + // Use DeprecatedFsync to sync disks. + return h.HandleOperations.DeprecatedFsync() + } + panic("invalid sync type") +} + +// Flush implements FileOperations.Flush. +func (h *Handle) Flush(context.Context, *fs.File) error { + return h.HandleOperations.DeprecatedFlush() +} + +// ConfigureMMap implements FileOperations.ConfigureMMap. +func (h *Handle) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + mappable := file.Dirent.Inode.Mappable() + if mappable == nil { + return syserror.ENODEV + } + return GenericConfigureMMap(file, mappable, opts) +} diff --git a/pkg/sentry/fs/fsutil/handle_test.go b/pkg/sentry/fs/fsutil/handle_test.go new file mode 100644 index 000000000..d94c3eb0d --- /dev/null +++ b/pkg/sentry/fs/fsutil/handle_test.go @@ -0,0 +1,227 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil_test + +import ( + "io" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type testInodeOperations struct { + fs.InodeOperations + fs.InodeType + FileSize int64 + writes uint + reads uint +} + +func (t *testInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + return fs.UnstableAttr{Size: t.FileSize}, nil +} + +// Check implements InodeOperations.Check. +func (t *testInodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +func (t *testInodeOperations) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + t.reads++ + return t.InodeOperations.DeprecatedPreadv(ctx, dst, offset) +} + +func (t *testInodeOperations) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + t.writes++ + return t.InodeOperations.DeprecatedPwritev(ctx, src, offset) +} + +// testHandle returns a handle for a test node. +// +// The size of the node is fixed at 20 bytes. +func testHandle(t *testing.T, flags fs.FileFlags, nt fs.InodeType) (*fs.File, *testInodeOperations) { + ctx := contexttest.Context(t) + m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + n := &testInodeOperations{ + InodeOperations: ramfstest.NewFile(ctx, fs.FilePermissions{User: fs.PermMask{Read: true, Write: true}}), + FileSize: 20, + } + d := fs.NewDirent(fs.NewInode(n, m, fs.StableAttr{Type: nt}), "test") + return fsutil.NewHandle(ctx, d, flags, d.Inode.HandleOps()), n +} + +func TestHandleOps(t *testing.T) { + h, n := testHandle(t, fs.FileFlags{Read: true, Write: true}, fs.RegularFile) + defer h.DecRef() + + // Make sure a write request works. + if n, err := h.Writev(contexttest.Context(t), usermem.BytesIOSequence([]byte("a"))); n != 1 || err != nil { + t.Fatalf("Writev: got (%d, %v), wanted (1, nil)", n, err) + } + if n.writes != 1 { + t.Errorf("found %d writes, expected 1", n.writes) + } + + // Make sure a read request works. + dst := make([]byte, 1) + if n, err := h.Preadv(contexttest.Context(t), usermem.BytesIOSequence(dst), 0); n != 1 || (err != nil && err != io.EOF) { + t.Errorf("Preadv: got (%d, %v), wanted (1, nil or EOF)", n, err) + } + if dst[0] != 'a' { + t.Errorf("Preadv: read %q, wanted 'a'", dst[0]) + } + if n.reads != 1 { + t.Errorf("found %d reads, expected 1", n.reads) + } +} + +type seekTest struct { + whence fs.SeekWhence + offset int64 + result int64 + err error +} + +type seekSuite struct { + nodeType fs.InodeType + cases []seekTest +} + +// FIXME: This is currently missing fs.SeekEnd tests due to the +// fact that NullInodeOperations returns an error on stat. +func TestHandleSeek(t *testing.T) { + ts := []seekSuite{ + { + nodeType: fs.RegularFile, + cases: []seekTest{ + {fs.SeekSet, 0, 0, nil}, + {fs.SeekSet, 10, 10, nil}, + {fs.SeekSet, -5, 10, syscall.EINVAL}, + {fs.SeekCurrent, -1, 9, nil}, + {fs.SeekCurrent, 2, 11, nil}, + {fs.SeekCurrent, -12, 11, syscall.EINVAL}, + {fs.SeekEnd, -1, 19, nil}, + {fs.SeekEnd, 0, 20, nil}, + {fs.SeekEnd, 2, 22, nil}, + }, + }, + { + nodeType: fs.Directory, + cases: []seekTest{ + {fs.SeekSet, 0, 0, nil}, + {fs.SeekSet, 10, 0, syscall.EINVAL}, + {fs.SeekSet, -5, 0, syscall.EINVAL}, + {fs.SeekCurrent, 0, 0, nil}, + {fs.SeekCurrent, 11, 0, syscall.EINVAL}, + {fs.SeekCurrent, -6, 0, syscall.EINVAL}, + {fs.SeekEnd, 0, 0, syscall.EINVAL}, + {fs.SeekEnd, -1, 0, syscall.EINVAL}, + {fs.SeekEnd, 2, 0, syscall.EINVAL}, + }, + }, + { + nodeType: fs.Symlink, + cases: []seekTest{ + {fs.SeekSet, 5, 0, syscall.EINVAL}, + {fs.SeekSet, -5, 0, syscall.EINVAL}, + {fs.SeekSet, 0, 0, syscall.EINVAL}, + {fs.SeekCurrent, 5, 0, syscall.EINVAL}, + {fs.SeekCurrent, -5, 0, syscall.EINVAL}, + {fs.SeekCurrent, 0, 0, syscall.EINVAL}, + {fs.SeekEnd, 5, 0, syscall.EINVAL}, + {fs.SeekEnd, -5, 0, syscall.EINVAL}, + {fs.SeekEnd, 0, 0, syscall.EINVAL}, + }, + }, + { + nodeType: fs.Pipe, + cases: []seekTest{ + {fs.SeekSet, 5, 0, syscall.ESPIPE}, + {fs.SeekSet, -5, 0, syscall.ESPIPE}, + {fs.SeekSet, 0, 0, syscall.ESPIPE}, + {fs.SeekCurrent, 5, 0, syscall.ESPIPE}, + {fs.SeekCurrent, -5, 0, syscall.ESPIPE}, + {fs.SeekCurrent, 0, 0, syscall.ESPIPE}, + {fs.SeekEnd, 5, 0, syscall.ESPIPE}, + {fs.SeekEnd, -5, 0, syscall.ESPIPE}, + {fs.SeekEnd, 0, 0, syscall.ESPIPE}, + }, + }, + { + nodeType: fs.Socket, + cases: []seekTest{ + {fs.SeekSet, 5, 0, syscall.ESPIPE}, + {fs.SeekSet, -5, 0, syscall.ESPIPE}, + {fs.SeekSet, 0, 0, syscall.ESPIPE}, + {fs.SeekCurrent, 5, 0, syscall.ESPIPE}, + {fs.SeekCurrent, -5, 0, syscall.ESPIPE}, + {fs.SeekCurrent, 0, 0, syscall.ESPIPE}, + {fs.SeekEnd, 5, 0, syscall.ESPIPE}, + {fs.SeekEnd, -5, 0, syscall.ESPIPE}, + {fs.SeekEnd, 0, 0, syscall.ESPIPE}, + }, + }, + { + nodeType: fs.CharacterDevice, + cases: []seekTest{ + {fs.SeekSet, 5, 0, nil}, + {fs.SeekSet, -5, 0, nil}, + {fs.SeekSet, 0, 0, nil}, + {fs.SeekCurrent, 5, 0, nil}, + {fs.SeekCurrent, -5, 0, nil}, + {fs.SeekCurrent, 0, 0, nil}, + {fs.SeekEnd, 5, 0, nil}, + {fs.SeekEnd, -5, 0, nil}, + {fs.SeekEnd, 0, 0, nil}, + }, + }, + { + nodeType: fs.BlockDevice, + cases: []seekTest{ + {fs.SeekSet, 0, 0, nil}, + {fs.SeekSet, 10, 10, nil}, + {fs.SeekSet, -5, 10, syscall.EINVAL}, + {fs.SeekCurrent, -1, 9, nil}, + {fs.SeekCurrent, 2, 11, nil}, + {fs.SeekCurrent, -12, 11, syscall.EINVAL}, + {fs.SeekEnd, -1, 19, nil}, + {fs.SeekEnd, 0, 20, nil}, + {fs.SeekEnd, 2, 22, nil}, + }, + }, + } + + for _, s := range ts { + h, _ := testHandle(t, fs.FileFlags{Read: true, Write: true}, s.nodeType) + defer h.DecRef() + + for _, c := range s.cases { + // Try the given seek. + offset, err := h.Seek(contexttest.Context(t), c.whence, c.offset) + if err != c.err { + t.Errorf("seek(%s, %d) on %s had unexpected error: expected %v, got %v", c.whence, c.offset, s.nodeType, c.err, err) + } + if err == nil && offset != c.result { + t.Errorf("seek(%s, %d) on %s had bad result: expected %v, got %v", c.whence, c.offset, s.nodeType, c.result, offset) + } + } + } +} diff --git a/pkg/sentry/fs/fsutil/host_file_mapper.go b/pkg/sentry/fs/fsutil/host_file_mapper.go new file mode 100644 index 000000000..d0a27fc1c --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper.go @@ -0,0 +1,209 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// HostFileMapper caches mappings of an arbitrary host file descriptor. It is +// used by implementations of memmap.Mappable that represent a host file +// descriptor. +type HostFileMapper struct { + // HostFile conceptually breaks the file into pieces called chunks, of + // size and alignment chunkSize, and caches mappings of the file on a chunk + // granularity. + + refsMu sync.Mutex `state:"nosave"` + + // refs maps chunk start offsets to the sum of reference counts for all + // pages in that chunk. refs is protected by refsMu. + refs map[uint64]int32 + + mapsMu sync.Mutex `state:"nosave"` + + // mappings maps chunk start offsets to mappings of those chunks, + // obtained by calling syscall.Mmap. mappings is protected by + // mapsMu. + mappings map[uint64]mapping `state:"nosave"` +} + +const ( + chunkShift = usermem.HugePageShift + chunkSize = 1 << chunkShift + chunkMask = chunkSize - 1 +) + +func pagesInChunk(mr memmap.MappableRange, chunkStart uint64) int32 { + return int32(mr.Intersect(memmap.MappableRange{chunkStart, chunkStart + chunkSize}).Length() / usermem.PageSize) +} + +type mapping struct { + addr uintptr + writable bool +} + +// NewHostFileMapper returns a HostFileMapper with no references or cached +// mappings. +func NewHostFileMapper() *HostFileMapper { + return &HostFileMapper{ + refs: make(map[uint64]int32), + mappings: make(map[uint64]mapping), + } +} + +// IncRefOn increments the reference count on all offsets in mr. +// +// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned. +func (f *HostFileMapper) IncRefOn(mr memmap.MappableRange) { + f.refsMu.Lock() + defer f.refsMu.Unlock() + for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize { + refs := f.refs[chunkStart] + pgs := pagesInChunk(mr, chunkStart) + if refs+pgs < refs { + // Would overflow. + panic(fmt.Sprintf("HostFileMapper.IncRefOn(%v): adding %d page references to chunk %#x, which has %d page references", mr, pgs, chunkStart, refs)) + } + f.refs[chunkStart] = refs + pgs + } +} + +// DecRefOn decrements the reference count on all offsets in mr. +// +// Preconditions: mr.Length() != 0. mr.Start and mr.End must be page-aligned. +func (f *HostFileMapper) DecRefOn(mr memmap.MappableRange) { + f.refsMu.Lock() + defer f.refsMu.Unlock() + for chunkStart := mr.Start &^ chunkMask; chunkStart < mr.End; chunkStart += chunkSize { + refs := f.refs[chunkStart] + pgs := pagesInChunk(mr, chunkStart) + switch { + case refs > pgs: + f.refs[chunkStart] = refs - pgs + case refs == pgs: + f.mapsMu.Lock() + delete(f.refs, chunkStart) + if m, ok := f.mappings[chunkStart]; ok { + f.unmapAndRemoveLocked(chunkStart, m) + } + f.mapsMu.Unlock() + case refs < pgs: + panic(fmt.Sprintf("HostFileMapper.DecRefOn(%v): removing %d page references from chunk %#x, which has %d page references", mr, pgs, chunkStart, refs)) + } + } +} + +// MapInternal returns a mapping of offsets in fr from fd. The returned +// safemem.BlockSeq is valid as long as at least one reference is held on all +// offsets in fr or until the next call to UnmapAll. +// +// Preconditions: The caller must hold a reference on all offsets in fr. +func (f *HostFileMapper) MapInternal(fr platform.FileRange, fd int, write bool) (safemem.BlockSeq, error) { + chunks := ((fr.End + chunkMask) >> chunkShift) - (fr.Start >> chunkShift) + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + if chunks == 1 { + // Avoid an unnecessary slice allocation. + var seq safemem.BlockSeq + err := f.forEachMappingBlockLocked(fr, fd, write, func(b safemem.Block) { + seq = safemem.BlockSeqOf(b) + }) + return seq, err + } + blocks := make([]safemem.Block, 0, chunks) + err := f.forEachMappingBlockLocked(fr, fd, write, func(b safemem.Block) { + blocks = append(blocks, b) + }) + return safemem.BlockSeqFromSlice(blocks), err +} + +// Preconditions: f.mapsMu must be locked. +func (f *HostFileMapper) forEachMappingBlockLocked(fr platform.FileRange, fd int, write bool, fn func(safemem.Block)) error { + prot := syscall.PROT_READ + if write { + prot |= syscall.PROT_WRITE + } + for chunkStart := fr.Start &^ chunkMask; chunkStart < fr.End; chunkStart += chunkSize { + m, ok := f.mappings[chunkStart] + if !ok { + addr, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + 0, + chunkSize, + uintptr(prot), + syscall.MAP_SHARED, + uintptr(fd), + uintptr(chunkStart)) + if errno != 0 { + return errno + } + m = mapping{addr, write} + f.mappings[chunkStart] = m + } else if write && !m.writable { + addr, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + m.addr, + chunkSize, + uintptr(prot), + syscall.MAP_SHARED|syscall.MAP_FIXED, + uintptr(fd), + uintptr(chunkStart)) + if errno != 0 { + return errno + } + m = mapping{addr, write} + f.mappings[chunkStart] = m + } + var startOff uint64 + if chunkStart < fr.Start { + startOff = fr.Start - chunkStart + } + endOff := uint64(chunkSize) + if chunkStart+chunkSize > fr.End { + endOff = fr.End - chunkStart + } + fn(f.unsafeBlockFromChunkMapping(m.addr).TakeFirst64(endOff).DropFirst64(startOff)) + } + return nil +} + +// UnmapAll unmaps all cached mappings. Callers are responsible for +// synchronization with mappings returned by previous calls to MapInternal. +func (f *HostFileMapper) UnmapAll() { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + for chunkStart, m := range f.mappings { + f.unmapAndRemoveLocked(chunkStart, m) + } +} + +// Preconditions: f.mapsMu must be locked. f.mappings[chunkStart] == m. +func (f *HostFileMapper) unmapAndRemoveLocked(chunkStart uint64, m mapping) { + if _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, m.addr, chunkSize, 0); errno != 0 { + // This leaks address space and is unexpected, but is otherwise + // harmless, so complain but don't panic. + log.Warningf("HostFileMapper: failed to unmap mapping %#x for chunk %#x: %v", m.addr, chunkStart, errno) + } + delete(f.mappings, chunkStart) +} diff --git a/pkg/sentry/fs/fsutil/host_file_mapper_state.go b/pkg/sentry/fs/fsutil/host_file_mapper_state.go new file mode 100644 index 000000000..57705decd --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper_state.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +// afterLoad is invoked by stateify. +func (f *HostFileMapper) afterLoad() { + f.mappings = make(map[uint64]mapping) +} diff --git a/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go b/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go new file mode 100644 index 000000000..790f3a5a6 --- /dev/null +++ b/pkg/sentry/fs/fsutil/host_file_mapper_unsafe.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" +) + +func (*HostFileMapper) unsafeBlockFromChunkMapping(addr uintptr) safemem.Block { + // We don't control the host file's length, so touching its mappings may + // raise SIGBUS. Thus accesses to it must use safecopy. + return safemem.BlockFromUnsafePointer((unsafe.Pointer)(addr), chunkSize) +} diff --git a/pkg/sentry/fs/fsutil/inode.go b/pkg/sentry/fs/fsutil/inode.go new file mode 100644 index 000000000..e1ad07df2 --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode.go @@ -0,0 +1,380 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// NewSimpleInodeOperations constructs fs.InodeOperations from InodeSimpleAttributes. +func NewSimpleInodeOperations(i InodeSimpleAttributes) fs.InodeOperations { + return &simpleInodeOperations{InodeSimpleAttributes: i} +} + +// simpleInodeOperations is a simple implementation of Inode. +type simpleInodeOperations struct { + DeprecatedFileOperations `state:"nosave"` + InodeNotDirectory `state:"nosave"` + InodeNotSocket `state:"nosave"` + InodeNotRenameable `state:"nosave"` + InodeNotOpenable `state:"nosave"` + InodeNotVirtual `state:"nosave"` + InodeNotSymlink `state:"nosave"` + InodeNoExtendedAttributes `state:"nosave"` + NoMappable `state:"nosave"` + NoopWriteOut `state:"nosave"` + + InodeSimpleAttributes +} + +// InodeSimpleAttributes implements a subset of the Inode interface. It provides +// read-only access to attributes. +type InodeSimpleAttributes struct { + // FSType is the filesystem type reported by StatFS. + FSType uint64 + + // UAttr are the unstable attributes of the Inode. + UAttr fs.UnstableAttr +} + +// Release implements fs.InodeOperations.Release. +func (i *InodeSimpleAttributes) Release(context.Context) {} + +// StatFS implements fs.InodeOperations.StatFS. +func (i *InodeSimpleAttributes) StatFS(context.Context) (fs.Info, error) { + return fs.Info{Type: i.FSType}, nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *InodeSimpleAttributes) UnstableAttr(context.Context, *fs.Inode) (fs.UnstableAttr, error) { + return i.UAttr, nil +} + +// Check implements fs.InodeOperations.Check. +func (i *InodeSimpleAttributes) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// AddLink implements fs.InodeOperations.AddLink. +func (*InodeSimpleAttributes) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +func (*InodeSimpleAttributes) DropLink() {} + +// NotifyStatusChange implements fs.fs.InodeOperations. +func (i *InodeSimpleAttributes) NotifyStatusChange(ctx context.Context) { + i.UAttr.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (*InodeSimpleAttributes) SetPermissions(context.Context, *fs.Inode, fs.FilePermissions) bool { + return false +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (*InodeSimpleAttributes) SetOwner(context.Context, *fs.Inode, fs.FileOwner) error { + return syserror.EINVAL +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (*InodeSimpleAttributes) SetTimestamps(context.Context, *fs.Inode, fs.TimeSpec) error { + return syserror.EINVAL +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*InodeSimpleAttributes) Truncate(context.Context, *fs.Inode, int64) error { + return syserror.EINVAL +} + +// InMemoryAttributes implements utilities for updating in-memory unstable +// attributes and extended attributes. It is not thread-safe. +// +// Users need not initialize Xattrs to non-nil (it will be initialized +// when the first extended attribute is set. +type InMemoryAttributes struct { + Unstable fs.UnstableAttr + Xattrs map[string][]byte +} + +// SetPermissions updates the permissions to p. +func (i *InMemoryAttributes) SetPermissions(ctx context.Context, p fs.FilePermissions) bool { + i.Unstable.Perms = p + i.Unstable.StatusChangeTime = ktime.NowFromContext(ctx) + return true +} + +// SetOwner updates the file owner to owner. +func (i *InMemoryAttributes) SetOwner(ctx context.Context, owner fs.FileOwner) error { + if owner.UID.Ok() { + i.Unstable.Owner.UID = owner.UID + } + if owner.GID.Ok() { + i.Unstable.Owner.GID = owner.GID + } + return nil +} + +// SetTimestamps sets the timestamps to ts. +func (i *InMemoryAttributes) SetTimestamps(ctx context.Context, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + now := ktime.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATimeSetSystemTime { + i.Unstable.AccessTime = now + } else { + i.Unstable.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTimeSetSystemTime { + i.Unstable.ModificationTime = now + } else { + i.Unstable.ModificationTime = ts.MTime + } + } + i.Unstable.StatusChangeTime = now + return nil +} + +// TouchAccessTime updates access time to the current time. +func (i *InMemoryAttributes) TouchAccessTime(ctx context.Context) { + i.Unstable.AccessTime = ktime.NowFromContext(ctx) +} + +// TouchModificationTime updates modification and status change +// time to the current time. +func (i *InMemoryAttributes) TouchModificationTime(ctx context.Context) { + now := ktime.NowFromContext(ctx) + i.Unstable.ModificationTime = now + i.Unstable.StatusChangeTime = now +} + +// TouchStatusChangeTime updates status change time to the current time. +func (i *InMemoryAttributes) TouchStatusChangeTime(ctx context.Context) { + i.Unstable.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// Getxattr returns the extended attribute at name or ENOATTR if +// it isn't set. +func (i *InMemoryAttributes) Getxattr(name string) ([]byte, error) { + if value, ok := i.Xattrs[name]; ok { + return value, nil + } + return nil, syserror.ENOATTR +} + +// Setxattr sets the extended attribute at name to value. +func (i *InMemoryAttributes) Setxattr(name string, value []byte) error { + if i.Xattrs == nil { + i.Xattrs = make(map[string][]byte) + } + i.Xattrs[name] = value + return nil +} + +// Listxattr returns the set of all currently set extended attributes. +func (i *InMemoryAttributes) Listxattr() (map[string]struct{}, error) { + names := make(map[string]struct{}, len(i.Xattrs)) + for name := range i.Xattrs { + names[name] = struct{}{} + } + return names, nil +} + +// NoMappable returns a nil memmap.Mappable. +type NoMappable struct{} + +// Mappable implements fs.InodeOperations.Mappable. +func (NoMappable) Mappable(*fs.Inode) memmap.Mappable { + return nil +} + +// NoopWriteOut is a no-op implementation of Inode.WriteOut. +type NoopWriteOut struct{} + +// WriteOut is a no-op. +func (NoopWriteOut) WriteOut(context.Context, *fs.Inode) error { + return nil +} + +// InodeNotDirectory can be used by Inodes that are not directories. +type InodeNotDirectory struct{} + +// Lookup implements fs.InodeOperations.Lookup. +func (InodeNotDirectory) Lookup(context.Context, *fs.Inode, string) (*fs.Dirent, error) { + return nil, syserror.ENOTDIR +} + +// Create implements fs.InodeOperations.Create. +func (InodeNotDirectory) Create(context.Context, *fs.Inode, string, fs.FileFlags, fs.FilePermissions) (*fs.File, error) { + return nil, syserror.ENOTDIR +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (InodeNotDirectory) CreateLink(context.Context, *fs.Inode, string, string) error { + return syserror.ENOTDIR +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +func (InodeNotDirectory) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (InodeNotDirectory) CreateDirectory(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.ENOTDIR +} + +// Bind implements fs.InodeOperations.Bind. +func (InodeNotDirectory) Bind(context.Context, *fs.Inode, string, unix.BoundEndpoint, fs.FilePermissions) error { + return syserror.ENOTDIR +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (InodeNotDirectory) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.ENOTDIR +} + +// Remove implements fs.InodeOperations.Remove. +func (InodeNotDirectory) Remove(context.Context, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (InodeNotDirectory) RemoveDirectory(context.Context, *fs.Inode, string) error { + return syserror.ENOTDIR +} + +// InodeNotSocket can be used by Inodes that are not sockets. +type InodeNotSocket struct{} + +// BoundEndpoint implements fs.InodeOperations.BoundEndpoint. +func (InodeNotSocket) BoundEndpoint(*fs.Inode, string) unix.BoundEndpoint { + return nil +} + +// InodeNotRenameable can be used by Inodes that cannot be renamed. +type InodeNotRenameable struct{} + +// Rename implements fs.InodeOperations.Rename. +func (InodeNotRenameable) Rename(context.Context, *fs.Inode, string, *fs.Inode, string) error { + return syserror.EINVAL +} + +// InodeNotOpenable can be used by Inodes that cannot be opened. +type InodeNotOpenable struct{} + +// GetFile implements fs.InodeOperations.GetFile. +func (InodeNotOpenable) GetFile(context.Context, *fs.Dirent, fs.FileFlags) (*fs.File, error) { + return nil, syserror.EIO +} + +// InodeNotVirtual can be used by Inodes that are not virtual. +type InodeNotVirtual struct{} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (InodeNotVirtual) IsVirtual() bool { + return false +} + +// InodeNotSymlink can be used by Inodes that are not symlinks. +type InodeNotSymlink struct{} + +// Readlink implements fs.InodeOperations.Readlink. +func (InodeNotSymlink) Readlink(context.Context, *fs.Inode) (string, error) { + return "", syserror.ENOLINK +} + +// Getlink implements fs.InodeOperations.Getlink. +func (InodeNotSymlink) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + return nil, syserror.ENOLINK +} + +// InodeNoExtendedAttributes can be used by Inodes that do not support +// extended attributes. +type InodeNoExtendedAttributes struct{} + +// Getxattr implements fs.InodeOperations.Getxattr. +func (InodeNoExtendedAttributes) Getxattr(*fs.Inode, string) ([]byte, error) { + return nil, syserror.EOPNOTSUPP +} + +// Setxattr implements fs.InodeOperations.Setxattr. +func (InodeNoExtendedAttributes) Setxattr(*fs.Inode, string, []byte) error { + return syserror.EOPNOTSUPP +} + +// Listxattr implements fs.InodeOperations.Listxattr. +func (InodeNoExtendedAttributes) Listxattr(*fs.Inode) (map[string]struct{}, error) { + return nil, syserror.EOPNOTSUPP +} + +// DeprecatedFileOperations panics if any deprecated Inode method is called. +type DeprecatedFileOperations struct{} + +// Readiness implements fs.InodeOperations.Waitable.Readiness. +func (DeprecatedFileOperations) Readiness(waiter.EventMask) waiter.EventMask { + panic("not implemented") +} + +// EventRegister implements fs.InodeOperations.Waitable.EventRegister. +func (DeprecatedFileOperations) EventRegister(*waiter.Entry, waiter.EventMask) { + panic("not implemented") +} + +// EventUnregister implements fs.InodeOperations.Waitable.EventUnregister. +func (DeprecatedFileOperations) EventUnregister(*waiter.Entry) { + panic("not implemented") +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (DeprecatedFileOperations) DeprecatedPreadv(context.Context, usermem.IOSequence, int64) (int64, error) { + panic("not implemented") +} + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev. +func (DeprecatedFileOperations) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { + panic("not implemented") +} + +// DeprecatedReaddir implements fs.InodeOperations.DeprecatedReaddir. +func (DeprecatedFileOperations) DeprecatedReaddir(context.Context, *fs.DirCtx, int) (int, error) { + panic("not implemented") +} + +// DeprecatedFsync implements fs.InodeOperations.DeprecatedFsync. +func (DeprecatedFileOperations) DeprecatedFsync() error { + panic("not implemented") +} + +// DeprecatedFlush implements fs.InodeOperations.DeprecatedFlush. +func (DeprecatedFileOperations) DeprecatedFlush() error { + panic("not implemented") +} + +// DeprecatedMappable implements fs.InodeOperations.DeprecatedMappable. +func (DeprecatedFileOperations) DeprecatedMappable(context.Context, *fs.Inode) (memmap.Mappable, bool) { + panic("not implemented") +} diff --git a/pkg/sentry/fs/fsutil/inode_cached.go b/pkg/sentry/fs/fsutil/inode_cached.go new file mode 100644 index 000000000..484668735 --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode_cached.go @@ -0,0 +1,845 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "fmt" + "io" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Lock order (compare the lock order model in mm/mm.go): +// +// CachingInodeOperations.attrMu ("fs locks") +// CachingInodeOperations.mapsMu ("memmap.Mappable locks not taken by Translate") +// CachingInodeOperations.dataMu ("memmap.Mappable locks taken by Translate") +// CachedFileObject locks + +// CachingInodeOperations caches the metadata and content of a CachedFileObject. +// It implements a subset of InodeOperations. As a utility it can be used to +// implement the full set of InodeOperations. Generally it should not be +// embedded to avoid unexpected inherited behavior. +// +// CachingInodeOperations implements Mappable for the CachedFileObject: +// +// - If CachedFileObject.FD returns a value >= 0 and the current platform shares +// a host fd table with the sentry, then the value of CachedFileObject.FD +// will be memory mapped on the host. +// +// - Otherwise, the contents of CachedFileObject are buffered into memory +// managed by the CachingInodeOperations. +// +// Implementations of FileOperations for a CachedFileObject must read and +// write through CachingInodeOperations using Read and Write respectively. +// +// Implementations of InodeOperations.WriteOut must call Sync to write out +// in-memory modifications of data and metadata to the CachedFileObject. +type CachingInodeOperations struct { + // backingFile is a handle to a cached file object. + backingFile CachedFileObject + + // platform is used to allocate memory that caches backingFile's contents. + platform platform.Platform + + // forcePageCache indicates the sentry page cache should be used regardless + // of whether the platform supports host mapped I/O or not. This must not be + // modified after inode creation. + forcePageCache bool + + attrMu sync.Mutex `state:"nosave"` + + // attr is unstable cached metadata. + // + // attr is protected by attrMu. attr.Size is protected by both attrMu and + // dataMu; reading it requires locking either mutex, while mutating it + // requires locking both. + attr fs.UnstableAttr + + // dirtyAttr is metadata that was updated in-place but hasn't yet + // been successfully written out. + // + // dirtyAttr is protected by attrMu. + dirtyAttr fs.AttrMask + + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks mappings of the cached file object into + // memmap.MappingSpaces. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + dataMu sync.RWMutex `state:"nosave"` + + // cache maps offsets into the cached file to offsets into + // platform.Memory() that store the file's data. + // + // cache is protected by dataMu. + cache FileRangeSet + + // dirty tracks dirty segments in cache. + // + // dirty is protected by dataMu. + dirty DirtySet + + // hostFileMapper caches internal mappings of backingFile.FD(). + hostFileMapper *HostFileMapper + + // refs tracks active references to data in the cache. + // + // refs is protected by dataMu. + refs frameRefSet +} + +// CachedFileObject is a file that may require caching. +type CachedFileObject interface { + // ReadToBlocksAt reads up to dsts.NumBytes() bytes from the file to dsts, + // starting at offset, and returns the number of bytes read. ReadToBlocksAt + // may return a partial read without an error. + ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) + + // WriteFromBlocksAt writes up to srcs.NumBytes() bytes from srcs to the + // file, starting at offset, and returns the number of bytes written. + // WriteFromBlocksAt may return a partial write without an error. + WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) + + // SetMaskedAttributes sets the attributes in attr that are true in mask + // on the backing file. + // + // SetMaskedAttributes may be called at any point, regardless of whether + // the file was opened. + SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr) error + + // Sync instructs the remote filesystem to sync the file to stable storage. + Sync(ctx context.Context) error + + // FD returns a host file descriptor. Return value must be -1 or not -1 + // for the lifetime of the CachedFileObject. + // + // FD is called iff the file has been memory mapped. This implies that + // the file was opened (see fs.InodeOperations.GetFile). + // + // FIXME: This interface seems to be + // fundamentally broken. We should clarify CachingInodeOperation's + // behavior with metadata. + FD() int +} + +// NewCachingInodeOperations returns a new CachingInodeOperations backed by +// a CachedFileObject and its initial unstable attributes. +func NewCachingInodeOperations(ctx context.Context, backingFile CachedFileObject, uattr fs.UnstableAttr, forcePageCache bool) *CachingInodeOperations { + p := platform.FromContext(ctx) + if p == nil { + panic(fmt.Sprintf("context.Context %T lacks non-nil value for key %T", ctx, platform.CtxPlatform)) + } + return &CachingInodeOperations{ + backingFile: backingFile, + platform: p, + forcePageCache: forcePageCache, + attr: uattr, + hostFileMapper: NewHostFileMapper(), + } +} + +// Release implements fs.InodeOperations.Release. +func (c *CachingInodeOperations) Release() { + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + c.dataMu.Lock() + defer c.dataMu.Unlock() + // The cache should be empty (something has gone terribly wrong if we're + // releasing an inode that is still memory-mapped). + if !c.mappings.IsEmpty() || !c.cache.IsEmpty() || !c.dirty.IsEmpty() { + panic(fmt.Sprintf("Releasing CachingInodeOperations with mappings:\n%s\ncache contents:\n%s\ndirty segments:\n%s", &c.mappings, &c.cache, &c.dirty)) + } +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (c *CachingInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + c.attrMu.Lock() + defer c.attrMu.Unlock() + return c.attr, nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (c *CachingInodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, perms fs.FilePermissions) bool { + c.attrMu.Lock() + defer c.attrMu.Unlock() + + masked := fs.AttrMask{Perms: true} + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{Perms: perms}); err != nil { + return false + } + c.attr.Perms = perms + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.Perms = true + c.touchStatusChangeTimeLocked(ctx) + return true + +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (c *CachingInodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + if !owner.UID.Ok() && !owner.GID.Ok() { + return nil + } + + c.attrMu.Lock() + defer c.attrMu.Unlock() + + masked := fs.AttrMask{ + UID: owner.UID.Ok(), + GID: owner.GID.Ok(), + } + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{Owner: owner}); err != nil { + return err + } + if owner.UID.Ok() { + c.attr.Owner.UID = owner.UID + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.UID = true + } + if owner.GID.Ok() { + c.attr.Owner.GID = owner.GID + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.GID = true + } + c.touchStatusChangeTimeLocked(ctx) + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (c *CachingInodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + c.attrMu.Lock() + defer c.attrMu.Unlock() + + // Replace requests to use the "system time" with the current time to + // ensure that cached timestamps remain consistent with the remote + // filesystem. + now := ktime.NowFromContext(ctx) + if ts.ATimeSetSystemTime { + ts.ATime = now + } + if ts.MTimeSetSystemTime { + ts.MTime = now + } + masked := fs.AttrMask{ + AccessTime: !ts.ATimeOmit, + ModificationTime: !ts.MTimeOmit, + } + if err := c.backingFile.SetMaskedAttributes(ctx, masked, fs.UnstableAttr{AccessTime: ts.ATime, ModificationTime: ts.MTime}); err != nil { + return err + } + if !ts.ATimeOmit { + c.attr.AccessTime = ts.ATime + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.AccessTime = true + } + if !ts.MTimeOmit { + c.attr.ModificationTime = ts.MTime + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.ModificationTime = true + } + c.touchStatusChangeTimeLocked(ctx) + return nil +} + +// Truncate implements fs.InodeOperations.Truncate. +func (c *CachingInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + c.attrMu.Lock() + defer c.attrMu.Unlock() + + // c.attr.Size is protected by both c.attrMu and c.dataMu. + c.dataMu.Lock() + if err := c.backingFile.SetMaskedAttributes(ctx, fs.AttrMask{ + Size: true, + }, fs.UnstableAttr{ + Size: size, + }); err != nil { + c.dataMu.Unlock() + return err + } + oldSize := c.attr.Size + if oldSize != size { + c.attr.Size = size + // FIXME: Clarify CachingInodeOperations behavior with metadata. + c.dirtyAttr.Size = true + c.touchModificationTimeLocked(ctx) + } + // We drop c.dataMu here so that we can lock c.mapsMu and invalidate + // mappings below. This allows concurrent calls to Read/Translate/etc. + // These functions synchronize with an in-progress Truncate by refusing to + // use cache contents beyond the new c.attr.Size. (We are still holding + // c.attrMu, so we can't race with Truncate/Write.) + c.dataMu.Unlock() + + // Nothing left to do unless shrinking the file. + if size >= oldSize { + return nil + } + + oldpgend := fs.OffsetPageEnd(oldSize) + newpgend := fs.OffsetPageEnd(size) + + // Invalidate past translations of truncated pages. + if newpgend != oldpgend { + c.mapsMu.Lock() + c.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{ + // Compare Linux's mm/truncate.c:truncate_setsize() => + // truncate_pagecache() => + // mm/memory.c:unmap_mapping_range(evencows=1). + InvalidatePrivate: true, + }) + c.mapsMu.Unlock() + } + + // We are now guaranteed that there are no translations of truncated pages, + // and can remove them from the cache. Since truncated pages have been + // removed from the backing file, they should be dropped without being + // written back. + c.dataMu.Lock() + defer c.dataMu.Unlock() + c.cache.Truncate(uint64(size), c.platform.Memory()) + c.dirty.KeepClean(memmap.MappableRange{uint64(size), oldpgend}) + + return nil +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (c *CachingInodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + c.attrMu.Lock() + + // Write dirty pages back. + c.dataMu.RLock() + err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt) + c.dataMu.RUnlock() + if err != nil { + c.attrMu.Unlock() + return err + } + + // Write out cached attributes. + if err := c.backingFile.SetMaskedAttributes(ctx, c.dirtyAttr, c.attr); err != nil { + c.attrMu.Unlock() + return err + } + c.dirtyAttr = fs.AttrMask{} + + c.attrMu.Unlock() + + // Fsync the remote file. + return c.backingFile.Sync(ctx) +} + +// IncLinks increases the link count and updates cached access time. +func (c *CachingInodeOperations) IncLinks(ctx context.Context) { + c.attrMu.Lock() + c.attr.Links++ + c.touchModificationTimeLocked(ctx) + c.attrMu.Unlock() +} + +// DecLinks decreases the link count and updates cached access time. +func (c *CachingInodeOperations) DecLinks(ctx context.Context) { + c.attrMu.Lock() + c.attr.Links-- + c.touchModificationTimeLocked(ctx) + c.attrMu.Unlock() +} + +// TouchAccessTime updates the cached access time in-place to the +// current time. It does not update status change time in-place. See +// mm/filemap.c:do_generic_file_read -> include/linux/h:file_accessed. +func (c *CachingInodeOperations) TouchAccessTime(ctx context.Context, inode *fs.Inode) { + if inode.MountSource.Flags.NoAtime { + return + } + + c.attrMu.Lock() + c.touchAccessTimeLocked(ctx) + c.attrMu.Unlock() +} + +// touchAccesstimeLocked updates the cached access time in-place to the current +// time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchAccessTimeLocked(ctx context.Context) { + c.attr.AccessTime = ktime.NowFromContext(ctx) + c.dirtyAttr.AccessTime = true +} + +// TouchModificationTime updates the cached modification and status change time +// in-place to the current time. +func (c *CachingInodeOperations) TouchModificationTime(ctx context.Context) { + c.attrMu.Lock() + c.touchModificationTimeLocked(ctx) + c.attrMu.Unlock() +} + +// touchModificationTimeLocked updates the cached modification and status +// change time in-place to the current time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchModificationTimeLocked(ctx context.Context) { + now := ktime.NowFromContext(ctx) + c.attr.ModificationTime = now + c.dirtyAttr.ModificationTime = true + c.attr.StatusChangeTime = now + c.dirtyAttr.StatusChangeTime = true +} + +// touchStatusChangeTimeLocked updates the cached status change time +// in-place to the current time. +// +// Preconditions: c.attrMu is locked for writing. +func (c *CachingInodeOperations) touchStatusChangeTimeLocked(ctx context.Context) { + now := ktime.NowFromContext(ctx) + c.attr.StatusChangeTime = now + c.dirtyAttr.StatusChangeTime = true +} + +// Read reads from frames and otherwise directly from the backing file +// into dst starting at offset until dst is full, EOF is reached, or an +// error is encountered. +// +// Read may partially fill dst and return a nil error. +func (c *CachingInodeOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if dst.NumBytes() == 0 { + return 0, nil + } + + // Have we reached EOF? We check for this again in + // inodeReadWriter.ReadToBlocks to avoid holding c.attrMu (which would + // serialize reads) or c.dataMu (which would violate lock ordering), but + // check here first (before calling into MM) since reading at EOF is + // common: getting a return value of 0 from a read syscall is the only way + // to detect EOF. + // + // TODO: Separate out c.attr.Size and use atomics instead of + // c.dataMu. + c.dataMu.RLock() + size := c.attr.Size + c.dataMu.RUnlock() + if offset >= size { + return 0, io.EOF + } + + n, err := dst.CopyOutFrom(ctx, &inodeReadWriter{ctx, c, offset}) + // Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed(). + c.TouchAccessTime(ctx, file.Dirent.Inode) + return n, err +} + +// Write writes to frames and otherwise directly to the backing file +// from src starting at offset and until src is empty or an error is +// encountered. +// +// If Write partially fills src, a non-nil error is returned. +func (c *CachingInodeOperations) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + + c.attrMu.Lock() + defer c.attrMu.Unlock() + // Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time(). + c.touchModificationTimeLocked(ctx) + return src.CopyInTo(ctx, &inodeReadWriter{ctx, c, offset}) +} + +type inodeReadWriter struct { + ctx context.Context + c *CachingInodeOperations + offset int64 +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *inodeReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + rw.c.dataMu.RLock() + defer rw.c.dataMu.RUnlock() + + // Compute the range to read. + if rw.offset >= rw.c.attr.Size { + return 0, io.EOF + } + end := fs.ReadEndOffset(rw.offset, int64(dsts.NumBytes()), rw.c.attr.Size) + if end == rw.offset { // dsts.NumBytes() == 0? + return 0, nil + } + + mem := rw.c.platform.Memory() + var done uint64 + seg, gap := rw.c.cache.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings from the cache. + ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + if err != nil { + return done, err + } + + // Copy from internal mappings. + n, err := safemem.CopySeq(dsts, ims) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + // Read directly from the backing file. + gapmr := gap.Range().Intersect(mr) + dst := dsts.TakeFirst64(gapmr.Length()) + n, err := rw.c.backingFile.ReadToBlocksAt(rw.ctx, dst, gapmr.Start) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + // Partial reads are fine. But we must stop reading. + if n != dst.NumBytes() || err != nil { + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +// +// Preconditions: rw.c.attrMu must be locked. +func (rw *inodeReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + rw.c.dataMu.Lock() + defer rw.c.dataMu.Unlock() + + // Compute the range to write. + end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes())) + if end == rw.offset { // srcs.NumBytes() == 0? + return 0, nil + } + + defer func() { + // If the write ends beyond the file's previous size, it causes the + // file to grow. + if rw.offset > rw.c.attr.Size { + rw.c.attr.Size = rw.offset + rw.c.dirtyAttr.Size = true + } + if rw.offset > rw.c.attr.Usage { + // This is incorrect if CachingInodeOperations is caching a sparse + // file. (In Linux, keeping inode::i_blocks up to date is the + // filesystem's responsibility.) + rw.c.attr.Usage = rw.offset + rw.c.dirtyAttr.Usage = true + } + }() + + mem := rw.c.platform.Memory() + var done uint64 + seg, gap := rw.c.cache.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok() && seg.Start() < mr.End: + // Get internal mappings from the cache. + segMR := seg.Range().Intersect(mr) + ims, err := mem.MapInternal(seg.FileRangeOf(segMR), usermem.Write) + if err != nil { + return done, err + } + + // Copy to internal mappings. + n, err := safemem.CopySeq(ims, srcs) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + rw.c.dirty.MarkDirty(segMR) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok() && gap.Start() < mr.End: + // Write directly to the backing file. + gapmr := gap.Range().Intersect(mr) + src := srcs.TakeFirst64(gapmr.Length()) + n, err := rw.c.backingFile.WriteFromBlocksAt(rw.ctx, src, gapmr.Start) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + // Partial writes are fine. But we must stop writing. + if n != src.NumBytes() || err != nil { + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (c *CachingInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + mapped := c.mappings.AddMapping(ms, ar, offset) + // Do this unconditionally since whether we have c.backingFile.FD() >= 0 + // can change across save/restore. + for _, r := range mapped { + c.hostFileMapper.IncRefOn(r) + } + if !usage.IncrementalMappedAccounting && !c.forcePageCache && c.backingFile.FD() >= 0 { + for _, r := range mapped { + usage.MemoryAccounting.Inc(r.Length(), usage.Mapped) + } + } + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (c *CachingInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + unmapped := c.mappings.RemoveMapping(ms, ar, offset) + for _, r := range unmapped { + c.hostFileMapper.DecRefOn(r) + } + if !c.forcePageCache && c.backingFile.FD() >= 0 { + if !usage.IncrementalMappedAccounting { + for _, r := range unmapped { + usage.MemoryAccounting.Dec(r.Length(), usage.Mapped) + } + } + return + } + + // Writeback dirty mapped memory now that there are no longer any + // mappings that reference it. This is our naive memory eviction + // strategy. + mem := c.platform.Memory() + c.dataMu.Lock() + defer c.dataMu.Unlock() + for _, r := range unmapped { + if err := SyncDirty(ctx, r, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + log.Warningf("Failed to writeback cached data %v: %v", r, err) + } + c.cache.Drop(r, mem) + c.dirty.KeepClean(r) + } +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (c *CachingInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + return c.AddMapping(ctx, ms, dstAR, offset) +} + +// Translate implements memmap.Mappable.Translate. +func (c *CachingInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + if !c.forcePageCache && c.backingFile.FD() >= 0 { + return []memmap.Translation{ + { + Source: optional, + File: c, + Offset: optional.Start, + }, + }, nil + } + + c.dataMu.Lock() + defer c.dataMu.Unlock() + + // Constrain translations to c.attr.Size (rounded up) to prevent + // translation to pages that may be concurrently truncated. + pgend := fs.OffsetPageEnd(c.attr.Size) + var beyondEOF bool + if required.End > pgend { + if required.Start >= pgend { + return nil, &memmap.BusError{io.EOF} + } + beyondEOF = true + required.End = pgend + } + if optional.End > pgend { + optional.End = pgend + } + + mem := c.platform.Memory() + cerr := c.cache.Fill(ctx, required, maxFillRange(required, optional), mem, usage.PageCache, c.backingFile.ReadToBlocksAt) + + var ts []memmap.Translation + var translatedEnd uint64 + for seg := c.cache.FindSegment(required.Start); seg.Ok() && seg.Start() < required.End; seg, _ = seg.NextNonEmpty() { + segMR := seg.Range().Intersect(optional) + ts = append(ts, memmap.Translation{ + Source: segMR, + File: mem, + Offset: seg.FileRangeOf(segMR).Start, + }) + if at.Write { + // From this point forward, this memory can be dirtied through the + // mapping at any time. + c.dirty.KeepDirty(segMR) + } + translatedEnd = segMR.End + } + + // Don't return the error returned by c.cache.Fill if it occurred outside + // of required. + if translatedEnd < required.End && cerr != nil { + return ts, &memmap.BusError{cerr} + } + if beyondEOF { + return ts, &memmap.BusError{io.EOF} + } + return ts, nil +} + +func maxFillRange(required, optional memmap.MappableRange) memmap.MappableRange { + const maxReadahead = 64 << 10 // 64 KB, chosen arbitrarily + if required.Length() >= maxReadahead { + return required + } + if optional.Length() <= maxReadahead { + return optional + } + optional.Start = required.Start + if optional.Length() <= maxReadahead { + return optional + } + optional.End = optional.Start + maxReadahead + return optional +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (c *CachingInodeOperations) InvalidateUnsavable(ctx context.Context) error { + // Whether we have a host fd (and consequently what platform.File is + // mapped) can change across save/restore, so invalidate all translations + // unconditionally. + c.mapsMu.Lock() + defer c.mapsMu.Unlock() + c.mappings.InvalidateAll(memmap.InvalidateOpts{}) + + // Sync the cache's contents so that if we have a host fd after restore, + // the remote file's contents are coherent. + c.dataMu.Lock() + defer c.dataMu.Unlock() + if err := SyncDirtyAll(ctx, &c.cache, &c.dirty, uint64(c.attr.Size), c.platform.Memory(), c.backingFile.WriteFromBlocksAt); err != nil { + return err + } + + // Discard the cache so that it's not stored in saved state. This is safe + // because per InvalidateUnsavable invariants, no new translations can have + // been returned after we invalidated all existing translations above. + c.cache.DropAll(c.platform.Memory()) + c.dirty.RemoveAll() + + return nil +} + +// MapInto implements platform.File.MapInto. This is used when we directly map +// an underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) MapInto(as platform.AddressSpace, addr usermem.Addr, fr platform.FileRange, at usermem.AccessType, precommit bool) error { + return as.MapFile(addr, c.backingFile.FD(), fr, at, precommit) +} + +// MapInternal implements platform.File.MapInternal. This is used when we +// directly map an underlying host fd and CachingInodeOperations is used as the +// platform.File during translation. +func (c *CachingInodeOperations) MapInternal(fr platform.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { + return c.hostFileMapper.MapInternal(fr, c.backingFile.FD(), at.Write) +} + +// IncRef implements platform.File.IncRef. This is used when we directly map an +// underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) IncRef(fr platform.FileRange) { + c.dataMu.Lock() + defer c.dataMu.Unlock() + + seg, gap := c.refs.Find(fr.Start) + for { + switch { + case seg.Ok() && seg.Start() < fr.End: + seg = c.refs.Isolate(seg, fr) + seg.SetValue(seg.Value() + 1) + seg, gap = seg.NextNonEmpty() + case gap.Ok() && gap.Start() < fr.End: + newRange := gap.Range().Intersect(fr) + if usage.IncrementalMappedAccounting { + usage.MemoryAccounting.Inc(newRange.Length(), usage.Mapped) + } + seg, gap = c.refs.InsertWithoutMerging(gap, newRange, 1).NextNonEmpty() + default: + c.refs.MergeAdjacent(fr) + return + } + } +} + +// DecRef implements platform.File.DecRef. This is used when we directly map an +// underlying host fd and CachingInodeOperations is used as the platform.File +// during translation. +func (c *CachingInodeOperations) DecRef(fr platform.FileRange) { + c.dataMu.Lock() + defer c.dataMu.Unlock() + + seg := c.refs.FindSegment(fr.Start) + + for seg.Ok() && seg.Start() < fr.End { + seg = c.refs.Isolate(seg, fr) + if old := seg.Value(); old == 1 { + if usage.IncrementalMappedAccounting { + usage.MemoryAccounting.Dec(seg.Range().Length(), usage.Mapped) + } + seg = c.refs.Remove(seg).NextSegment() + } else { + seg.SetValue(old - 1) + seg = seg.NextSegment() + } + } + c.refs.MergeAdjacent(fr) +} diff --git a/pkg/sentry/fs/fsutil/inode_cached_test.go b/pkg/sentry/fs/fsutil/inode_cached_test.go new file mode 100644 index 000000000..996c91849 --- /dev/null +++ b/pkg/sentry/fs/fsutil/inode_cached_test.go @@ -0,0 +1,403 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fsutil + +import ( + "bytes" + "io" + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type noopBackingFile struct{} + +func (noopBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + return dsts.NumBytes(), nil +} + +func (noopBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + return srcs.NumBytes(), nil +} + +func (noopBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr) error { + return nil +} + +func (noopBackingFile) Sync(context.Context) error { + return nil +} + +func (noopBackingFile) FD() int { + return -1 +} + +func TestSetPermissions(t *testing.T) { + ctx := contexttest.Context(t) + + uattr := fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Perms: fs.FilePermsFromMode(0444), + }) + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, false /*forcePageCache*/) + defer iops.Release() + + perms := fs.FilePermsFromMode(0777) + if !iops.SetPermissions(ctx, nil, perms) { + t.Fatalf("SetPermissions failed, want success") + } + + // Did permissions change? + if !iops.dirtyAttr.Perms { + t.Fatalf("got perms not dirty, want dirty") + } + if iops.attr.Perms != perms { + t.Fatalf("got perms +%v, want +%v", iops.attr.Perms, perms) + } + + // Did status change time change? + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("got status change time not dirty, want dirty") + } + if iops.attr.StatusChangeTime.Equal(uattr.StatusChangeTime) { + t.Fatalf("got status change time unchanged") + } +} + +func TestSetTimestamps(t *testing.T) { + ctx := contexttest.Context(t) + for _, test := range []struct { + desc string + ts fs.TimeSpec + wantDirty fs.AttrMask + }{ + { + desc: "noop", + ts: fs.TimeSpec{ + ATimeOmit: true, + MTimeOmit: true, + }, + wantDirty: fs.AttrMask{}, + }, + { + desc: "access time only", + ts: fs.TimeSpec{ + ATime: ktime.NowFromContext(ctx), + MTimeOmit: true, + }, + wantDirty: fs.AttrMask{ + AccessTime: true, + StatusChangeTime: true, + }, + }, + { + desc: "modification time only", + ts: fs.TimeSpec{ + ATimeOmit: true, + MTime: ktime.NowFromContext(ctx), + }, + wantDirty: fs.AttrMask{ + ModificationTime: true, + StatusChangeTime: true, + }, + }, + { + desc: "access and modification time", + ts: fs.TimeSpec{ + ATime: ktime.NowFromContext(ctx), + MTime: ktime.NowFromContext(ctx), + }, + wantDirty: fs.AttrMask{ + AccessTime: true, + ModificationTime: true, + StatusChangeTime: true, + }, + }, + { + desc: "system time access and modification time", + ts: fs.TimeSpec{ + ATimeSetSystemTime: true, + MTimeSetSystemTime: true, + }, + wantDirty: fs.AttrMask{ + AccessTime: true, + ModificationTime: true, + StatusChangeTime: true, + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + ctx := contexttest.Context(t) + + epoch := ktime.ZeroTime + uattr := fs.UnstableAttr{ + AccessTime: epoch, + ModificationTime: epoch, + StatusChangeTime: epoch, + } + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, false /*forcePageCache*/) + defer iops.Release() + + if err := iops.SetTimestamps(ctx, nil, test.ts); err != nil { + t.Fatalf("SetTimestamps got error %v, want nil", err) + } + if !reflect.DeepEqual(iops.dirtyAttr, test.wantDirty) { + t.Fatalf("dirty got %+v, want %+v", iops.dirtyAttr, test.wantDirty) + } + if iops.dirtyAttr.AccessTime { + if !iops.attr.AccessTime.After(uattr.AccessTime) { + t.Fatalf("diritied access time did not advance, want %v > %v", iops.attr.AccessTime, uattr.AccessTime) + } + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("dirty access time requires dirty status change time") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not advance") + } + } + if iops.dirtyAttr.ModificationTime { + if !iops.attr.ModificationTime.After(uattr.ModificationTime) { + t.Fatalf("diritied modification time did not advance") + } + if !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("dirty modification time requires dirty status change time") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not advance") + } + } + }) + } +} + +func TestTruncate(t *testing.T) { + ctx := contexttest.Context(t) + + uattr := fs.UnstableAttr{ + Size: 0, + } + iops := NewCachingInodeOperations(ctx, noopBackingFile{}, uattr, false /*forcePageCache*/) + defer iops.Release() + + if err := iops.Truncate(ctx, nil, uattr.Size); err != nil { + t.Fatalf("Truncate got error %v, want nil", err) + } + if iops.dirtyAttr.Size { + t.Fatalf("Truncate caused size to be dirtied") + } + var size int64 = 4096 + if err := iops.Truncate(ctx, nil, size); err != nil { + t.Fatalf("Truncate got error %v, want nil", err) + } + if !iops.dirtyAttr.Size { + t.Fatalf("Truncate caused size to not be dirtied") + } + if iops.attr.Size != size { + t.Fatalf("Truncate got %d, want %d", iops.attr.Size, size) + } + if !iops.dirtyAttr.ModificationTime || !iops.dirtyAttr.StatusChangeTime { + t.Fatalf("Truncate did not dirty modification and status change time") + } + if !iops.attr.ModificationTime.After(uattr.ModificationTime) { + t.Fatalf("dirtied modification time did not change") + } + if !iops.attr.StatusChangeTime.After(uattr.StatusChangeTime) { + t.Fatalf("dirtied status change time did not change") + } +} + +type sliceBackingFile struct { + data []byte +} + +func newSliceBackingFile(data []byte) *sliceBackingFile { + return &sliceBackingFile{data} +} + +func (f *sliceBackingFile) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + r := safemem.BlockSeqReader{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)} + return r.ReadToBlocks(dsts) +} + +func (f *sliceBackingFile) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + w := safemem.BlockSeqWriter{safemem.BlockSeqOf(safemem.BlockFromSafeSlice(f.data)).DropFirst64(offset)} + return w.WriteFromBlocks(srcs) +} + +func (*sliceBackingFile) SetMaskedAttributes(context.Context, fs.AttrMask, fs.UnstableAttr) error { + return nil +} + +func (*sliceBackingFile) Sync(context.Context) error { + return nil +} + +func (*sliceBackingFile) FD() int { + return -1 +} + +type noopMappingSpace struct{} + +// Invalidate implements memmap.MappingSpace.Invalidate. +func (noopMappingSpace) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) { +} + +func anonInode(ctx context.Context) *fs.Inode { + return fs.NewInode(NewSimpleInodeOperations(InodeSimpleAttributes{ + UAttr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + Links: 1, + }), + }), fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}), fs.StableAttr{ + Type: fs.Anonymous, + BlockSize: usermem.PageSize, + }) +} + +func pagesOf(bs ...byte) []byte { + buf := make([]byte, 0, len(bs)*usermem.PageSize) + for _, b := range bs { + buf = append(buf, bytes.Repeat([]byte{b}, usermem.PageSize)...) + } + return buf +} + +func TestRead(t *testing.T) { + ctx := contexttest.Context(t) + + // Construct a 3-page file. + buf := pagesOf('a', 'b', 'c') + file := fs.NewFile(ctx, fs.NewDirent(anonInode(ctx), "anon"), fs.FileFlags{}, nil) + uattr := fs.UnstableAttr{ + Size: int64(len(buf)), + } + iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, false /*forcePageCache*/) + defer iops.Release() + + // Expect the cache to be initially empty. + if cached := iops.cache.Span(); cached != 0 { + t.Errorf("Span got %d, want 0", cached) + } + + // Create a memory mapping of the second page (as CachingInodeOperations + // expects to only cache mapped pages), then call Translate to force it to + // be cached. + var ms noopMappingSpace + ar := usermem.AddrRange{usermem.PageSize, 2 * usermem.PageSize} + if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize); err != nil { + t.Fatalf("AddMapping got %v, want nil", err) + } + mr := memmap.MappableRange{usermem.PageSize, 2 * usermem.PageSize} + if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + t.Fatalf("Translate got %v, want nil", err) + } + if cached := iops.cache.Span(); cached != usermem.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, usermem.PageSize) + } + + // Try to read 4 pages. The first and third pages should be read directly + // from the "file", the second page should be read from the cache, and only + // 3 pages (the size of the file) should be readable. + rbuf := make([]byte, 4*usermem.PageSize) + dst := usermem.BytesIOSequence(rbuf) + n, err := iops.Read(ctx, file, dst, 0) + if n != 3*usermem.PageSize || (err != nil && err != io.EOF) { + t.Fatalf("Read got (%d, %v), want (%d, nil or EOF)", n, err, 3*usermem.PageSize) + } + rbuf = rbuf[:3*usermem.PageSize] + + // Did we get the bytes we expect? + if !bytes.Equal(rbuf, buf) { + t.Errorf("Read back bytes %v, want %v", rbuf, buf) + } + + // Delete the memory mapping and expect it to cause the cached page to be + // uncached. + iops.RemoveMapping(ctx, ms, ar, usermem.PageSize) + if cached := iops.cache.Span(); cached != 0 { + t.Fatalf("Span got %d, want 0", cached) + } +} + +func TestWrite(t *testing.T) { + ctx := contexttest.Context(t) + + // Construct a 4-page file. + buf := pagesOf('a', 'b', 'c', 'd') + orig := append([]byte(nil), buf...) + inode := anonInode(ctx) + uattr := fs.UnstableAttr{ + Size: int64(len(buf)), + } + iops := NewCachingInodeOperations(ctx, newSliceBackingFile(buf), uattr, false /*forcePageCache*/) + defer iops.Release() + + // Expect the cache to be initially empty. + if cached := iops.cache.Span(); cached != 0 { + t.Errorf("Span got %d, want 0", cached) + } + + // Create a memory mapping of the second and third pages (as + // CachingInodeOperations expects to only cache mapped pages), then call + // Translate to force them to be cached. + var ms noopMappingSpace + ar := usermem.AddrRange{usermem.PageSize, 3 * usermem.PageSize} + if err := iops.AddMapping(ctx, ms, ar, usermem.PageSize); err != nil { + t.Fatalf("AddMapping got %v, want nil", err) + } + defer iops.RemoveMapping(ctx, ms, ar, usermem.PageSize) + mr := memmap.MappableRange{usermem.PageSize, 3 * usermem.PageSize} + if _, err := iops.Translate(ctx, mr, mr, usermem.Read); err != nil { + t.Fatalf("Translate got %v, want nil", err) + } + if cached := iops.cache.Span(); cached != 2*usermem.PageSize { + t.Errorf("SpanRange got %d, want %d", cached, 2*usermem.PageSize) + } + + // Write to the first 2 pages. + wbuf := pagesOf('e', 'f') + src := usermem.BytesIOSequence(wbuf) + n, err := iops.Write(ctx, src, 0) + if n != 2*usermem.PageSize || err != nil { + t.Fatalf("Write got (%d, %v), want (%d, nil)", n, err, 2*usermem.PageSize) + } + + // The first page should have been written directly, since it was not cached. + want := append([]byte(nil), orig...) + copy(want, pagesOf('e')) + if !bytes.Equal(buf, want) { + t.Errorf("File contents are %v, want %v", buf, want) + } + + // Sync back to the "backing file". + if err := iops.WriteOut(ctx, inode); err != nil { + t.Errorf("Sync got %v, want nil", err) + } + + // Now the second page should have been written as well. + copy(want[usermem.PageSize:], pagesOf('f')) + if !bytes.Equal(buf, want) { + t.Errorf("File contents are %v, want %v", buf, want) + } +} diff --git a/pkg/sentry/fs/g3doc/inotify.md b/pkg/sentry/fs/g3doc/inotify.md new file mode 100644 index 000000000..1e99a3357 --- /dev/null +++ b/pkg/sentry/fs/g3doc/inotify.md @@ -0,0 +1,122 @@ +# Inotify + +Inotify implements the like-named filesystem event notification system for the +sentry, see `inotify(7)`. + +## Architecture + +For the most part, the sentry implementation of inotify mirrors the Linux +architecture. Inotify instances (i.e. the fd returned by inotify_init(2)) are +backed by a pseudo-filesystem. Events are generated from various places in the +sentry, including the [syscall layer][syscall_dir], the [vfs layer][dirent] and +the [process fd table][fd_map]. Watches are stored in inodes and generated +events are queued to the inotify instance owning the watches for delivery to the +user. + +## Objects + +Here is a brief description of the existing and new objects involved in the +sentry inotify mechanism, and how they interact: + +### [`fs.Inotify`][inotify] + +- An inotify instances, created by inotify_init(2)/inotify_init1(2). +- The inotify fd has a `fs.Dirent`, supports filesystem syscalls to read + events. +- Has multiple `fs.Watch`es, with at most one watch per target inode, per + inotify instance. +- Has an instance `id` which is globally unique. This is *not* the fd number + for this instance, since the fd can be duped. This `id` is not externally + visible. + +### [`fs.Watch`][watch] + +- An inotify watch, created/deleted by + inotify_add_watch(2)/inotify_rm_watch(2). +- Owned by an `fs.Inotify` instance, each watch keeps a pointer to the + `owner`. +- Associated with a single `fs.Inode`, which is the watch `target`. While the + watch is active, it indirectly pins `target` to memory. See the "Reference + Model" section for a detailed explanation. +- Filesystem operations on `target` generate `fs.Event`s. + +### [`fs.Event`][event] + +- A simple struct encapsulating all the fields for an inotify event. +- Generated by `fs.Watch`es and forwarded to the watches' `owner`s. +- Serialized to the user during read(2) syscalls on the associated + `fs.Inotify`'s fd. + +### [`fs.Dirent`][dirent] + +- Many inotify events are generated inside dirent methods. Events are + generated in the dirent methods rather than `fs.Inode` methods because some + events carry the name of the subject node, and node names are generally + unavailable in an `fs.Inode`. +- Dirents do not directly contain state for any watches. Instead, they forward + notifications to the underlying `fs.Inode`. + +### [`fs.Inode`][inode] + +- Interacts with inotify through `fs.Watch`es. +- Inodes contain a map of all active `fs.Watch`es on them. +- An `fs.Inotify` instance can have at most one `fs.Watch` per inode. + `fs.Watch`es on an inode are indexed by their `owner`'s `id`. +- All inotify logic is encapsulated in the [`Watches`][inode_watches] struct + in an inode. Logically, `Watches` is the set of inotify watches on the + inode. + +## Reference Model + +The sentry inotify implementation has a complex reference model. An inotify +watch observes a single inode. For efficient lookup, the state for a watch is +stored directly on the target inode. This state needs to be persistent for the +lifetime of watch. Unlike usual filesystem metadata, the watch state has no +"on-disk" representation, so they cannot be reconstructed by the filesystem if +the inode is flushed from memory. This effectively means we need to keep any +inodes with actives watches pinned to memory. + +We can't just hold an extra ref on the inode to pin it to memory because some +filesystems (such as gofer-based filesystems) don't have persistent inodes. In +such a filesystem, if we just pin the inode, nothing prevents the enclosing +dirent from being GCed. Once the dirent is GCed, the pinned inode is +unreachable -- these filesystems generate a new inode by re-reading the node +state on the next walk. Incidentally, hardlinks also don't work on these +filesystems for this reason. + +To prevent the above scenario, when a new watch is added on an inode, we *pin* +the dirent we used to reach the inode. Note that due to hardlinks, this dirent +may not be the only dirent pointing to the inode. Attempting to set an inotify +watch via multiple hardlinks to the same file results in the same watch being +returned for both links. However, for each new dirent we use to reach the same +inode, we add a new pin. We need a new pin for each new dirent used to reach the +inode because we have no guarantees about the deletion order of the different +links to the inode. + +## Lock Ordering + +There are 4 locks related to the inotify implementation: + +- `Inotify.mu`: the inotify instance lock. +- `Inotify.evMu`: the inotify event queue lock. +- `Watch.mu`: the watch lock, used to protect pins. +- `fs.Watches.mu`: the inode watch set mu, used to protect the collection of + watches on the inode. + +The correct lock ordering for inotify code is: + +`Inotify.mu` -> `fs.Watches.mu` -> `Watch.mu` -> `Inotify.evMu`. + +We need a distinct lock for the event queue because by the time a goroutine +attempts to queue a new event, it is already holding `fs.Watches.mu`. If we used +`Inotify.mu` to also protect the event queue, this would violate the above lock +ordering. + +[dirent]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/dirent.go +[event]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/inotify_event.go +[fd_map]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/kernel/fd_map.go +[inode]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/inode.go +[inode_watches]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/inode_inotify.go +[inotify]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/inotify.go +[syscall_dir]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/syscalls/linux/ +[watch]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/fs/inotify_watch.go diff --git a/pkg/sentry/fs/gofer/BUILD b/pkg/sentry/fs/gofer/BUILD new file mode 100644 index 000000000..ca42b0a54 --- /dev/null +++ b/pkg/sentry/fs/gofer/BUILD @@ -0,0 +1,90 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "gofer_state", + srcs = [ + "file.go", + "file_state.go", + "fs.go", + "inode.go", + "inode_state.go", + "session.go", + "session_state.go", + ], + out = "gofer_state.go", + package = "gofer", +) + +go_library( + name = "gofer", + srcs = [ + "attr.go", + "context_file.go", + "device.go", + "file.go", + "file_state.go", + "fs.go", + "gofer_state.go", + "handles.go", + "inode.go", + "inode_state.go", + "path.go", + "session.go", + "session_state.go", + "socket.go", + "util.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/gofer", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/fd", + "//pkg/log", + "//pkg/metric", + "//pkg/p9", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fdpipe", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/host", + "//pkg/sentry/fs/lock", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/unet", + "//pkg/waiter", + ], +) + +go_test( + name = "gofer_test", + size = "small", + srcs = ["gofer_test.go"], + embed = [":gofer"], + deps = [ + "//pkg/log", + "//pkg/p9", + "//pkg/p9/p9test", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/unet", + ], +) diff --git a/pkg/sentry/fs/gofer/attr.go b/pkg/sentry/fs/gofer/attr.go new file mode 100644 index 000000000..5e24767f9 --- /dev/null +++ b/pkg/sentry/fs/gofer/attr.go @@ -0,0 +1,162 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// getattr returns the 9p attributes of the p9.File. On success, Mode, Size, and RDev +// are guaranteed to be masked as valid. +func getattr(ctx context.Context, file contextFile) (p9.QID, p9.AttrMask, p9.Attr, error) { + // Retrieve attributes over the wire. + qid, valid, attr, err := file.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + return qid, valid, attr, err + } + + // Require mode, size, and raw device id. + if !valid.Mode || !valid.Size || !valid.RDev { + return qid, valid, attr, syscall.EIO + } + + return qid, valid, attr, nil +} + +func unstable(ctx context.Context, valid p9.AttrMask, pattr p9.Attr, mounter fs.FileOwner, client *p9.Client) fs.UnstableAttr { + return fs.UnstableAttr{ + Size: int64(pattr.Size), + Usage: int64(pattr.Size), + Perms: perms(valid, pattr, client), + Owner: owner(mounter, valid, pattr), + AccessTime: atime(ctx, valid, pattr), + ModificationTime: mtime(ctx, valid, pattr), + StatusChangeTime: ctime(ctx, valid, pattr), + Links: links(valid, pattr), + } +} + +func perms(valid p9.AttrMask, pattr p9.Attr, client *p9.Client) fs.FilePermissions { + if pattr.Mode.IsDir() && !p9.VersionSupportsMultiUser(client.Version()) { + // If user and group permissions bits are not supplied, use + // "other" bits to supplement them. + // + // Older Gofer's fake directories only have "other" permission, + // but will often be accessed via user or group permissions. + if pattr.Mode&0770 == 0 { + other := pattr.Mode & 07 + pattr.Mode = pattr.Mode | other<<3 | other<<6 + } + } + return fs.FilePermsFromP9(pattr.Mode) +} + +func owner(mounter fs.FileOwner, valid p9.AttrMask, pattr p9.Attr) fs.FileOwner { + // Unless the file returned its UID and GID, it belongs to the mounting + // task's EUID/EGID. + owner := mounter + if valid.UID { + owner.UID = auth.KUID(pattr.UID) + } + if valid.GID { + owner.GID = auth.KGID(pattr.GID) + } + return owner +} + +// bsize returns a block size from 9p attributes. +func bsize(pattr p9.Attr) int64 { + if pattr.BlockSize > 0 { + return int64(pattr.BlockSize) + } + // Some files may have no clue of their block size. Better not to report + // something misleading or buggy and have a safe default. + return usermem.PageSize +} + +// ntype returns an fs.InodeType from 9p attributes. +func ntype(pattr p9.Attr) fs.InodeType { + switch { + case pattr.Mode.IsNamedPipe(): + return fs.Pipe + case pattr.Mode.IsDir(): + return fs.Directory + case pattr.Mode.IsSymlink(): + return fs.Symlink + case pattr.Mode.IsCharacterDevice(): + return fs.CharacterDevice + case pattr.Mode.IsBlockDevice(): + return fs.BlockDevice + case pattr.Mode.IsSocket(): + return fs.Socket + case pattr.Mode.IsRegular(): + fallthrough + default: + return fs.RegularFile + } +} + +// ctime returns a change time from 9p attributes. +func ctime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.CTime { + return ktime.FromUnix(int64(pattr.CTimeSeconds), int64(pattr.CTimeNanoSeconds)) + } + // Approximate ctime with mtime if ctime isn't available. + return mtime(ctx, valid, pattr) +} + +// atime returns an access time from 9p attributes. +func atime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.ATime { + return ktime.FromUnix(int64(pattr.ATimeSeconds), int64(pattr.ATimeNanoSeconds)) + } + return ktime.NowFromContext(ctx) +} + +// mtime returns a modification time from 9p attributes. +func mtime(ctx context.Context, valid p9.AttrMask, pattr p9.Attr) ktime.Time { + if valid.MTime { + return ktime.FromUnix(int64(pattr.MTimeSeconds), int64(pattr.MTimeNanoSeconds)) + } + return ktime.NowFromContext(ctx) +} + +// links returns a hard link count from 9p attributes. +func links(valid p9.AttrMask, pattr p9.Attr) uint64 { + // For gofer file systems that support link count (such as a local file gofer), + // we return the link count reported by the underlying file system. + if valid.NLink { + return pattr.NLink + } + + // This node is likely backed by a file system that doesn't support links. + // We could readdir() and count children directories to provide an accurate + // link count. However this may be expensive since the gofer may be backed by remote + // storage. Instead, simply return 2 links for directories and 1 for everything else + // since no one relies on an accurate link count for gofer-based file systems. + switch ntype(pattr) { + case fs.Directory: + return 2 + default: + return 1 + } +} diff --git a/pkg/sentry/fs/gofer/context_file.go b/pkg/sentry/fs/gofer/context_file.go new file mode 100644 index 000000000..d4b6f6eb7 --- /dev/null +++ b/pkg/sentry/fs/gofer/context_file.go @@ -0,0 +1,190 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextFile is a wrapper around p9.File that notifies the context that +// it's about to sleep before calling the Gofer over P9. +type contextFile struct { + file p9.File +} + +func (c *contextFile) walk(ctx context.Context, names []string) ([]p9.QID, contextFile, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + q, f, err := c.file.Walk(names) + if err != nil { + return nil, contextFile{}, err + } + return q, contextFile{file: f}, nil +} + +func (c *contextFile) statFS(ctx context.Context) (p9.FSStat, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.StatFS() +} + +func (c *contextFile) getAttr(ctx context.Context, req p9.AttrMask) (p9.QID, p9.AttrMask, p9.Attr, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.GetAttr(req) +} + +func (c *contextFile) setAttr(ctx context.Context, valid p9.SetAttrMask, attr p9.SetAttr) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.SetAttr(valid, attr) +} + +func (c *contextFile) remove(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Remove() +} + +func (c *contextFile) rename(ctx context.Context, directory contextFile, name string) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Rename(directory.file, name) +} + +func (c *contextFile) close(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Close() +} + +func (c *contextFile) open(ctx context.Context, mode p9.OpenFlags) (*fd.FD, p9.QID, uint32, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Open(mode) +} + +func (c *contextFile) readAt(ctx context.Context, p []byte, offset uint64) (int, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.ReadAt(p, offset) +} + +func (c *contextFile) writeAt(ctx context.Context, p []byte, offset uint64) (int, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.WriteAt(p, offset) +} + +func (c *contextFile) fsync(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.FSync() +} + +func (c *contextFile) create(ctx context.Context, name string, flags p9.OpenFlags, permissions p9.FileMode, uid p9.UID, gid p9.GID) (*fd.FD, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + fd, _, _, _, err := c.file.Create(name, flags, permissions, uid, gid) + return fd, err +} + +func (c *contextFile) mkdir(ctx context.Context, name string, permissions p9.FileMode, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Mkdir(name, permissions, uid, gid) +} + +func (c *contextFile) symlink(ctx context.Context, oldName string, newName string, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Symlink(oldName, newName, uid, gid) +} + +func (c *contextFile) link(ctx context.Context, target *contextFile, newName string) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Link(target.file, newName) +} + +func (c *contextFile) mknod(ctx context.Context, name string, permissions p9.FileMode, major uint32, minor uint32, uid p9.UID, gid p9.GID) (p9.QID, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Mknod(name, permissions, major, minor, uid, gid) +} + +func (c *contextFile) unlinkAt(ctx context.Context, name string, flags uint32) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.UnlinkAt(name, flags) +} + +func (c *contextFile) readdir(ctx context.Context, offset uint64, count uint32) ([]p9.Dirent, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Readdir(offset, count) +} + +func (c *contextFile) readlink(ctx context.Context) (string, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Readlink() +} + +func (c *contextFile) flush(ctx context.Context) error { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Flush() +} + +func (c *contextFile) walkGetAttr(ctx context.Context, names []string) ([]p9.QID, contextFile, p9.AttrMask, p9.Attr, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + q, f, m, a, err := c.file.WalkGetAttr(names) + if err != nil { + return nil, contextFile{}, p9.AttrMask{}, p9.Attr{}, err + } + return q, contextFile{file: f}, m, a, nil +} + +func (c *contextFile) connect(ctx context.Context, flags p9.ConnectFlags) (*fd.FD, error) { + ctx.UninterruptibleSleepStart(false) + defer ctx.UninterruptibleSleepFinish(false) + + return c.file.Connect(flags) +} diff --git a/pkg/sentry/fs/gofer/device.go b/pkg/sentry/fs/gofer/device.go new file mode 100644 index 000000000..fac7306d4 --- /dev/null +++ b/pkg/sentry/fs/gofer/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// goferDevice is the gofer virtual device. +var goferDevice = device.NewAnonMultiDevice() diff --git a/pkg/sentry/fs/gofer/file.go b/pkg/sentry/fs/gofer/file.go new file mode 100644 index 000000000..07c9bf01d --- /dev/null +++ b/pkg/sentry/fs/gofer/file.go @@ -0,0 +1,255 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/metric" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +var openedWX = metric.MustCreateNewUint64Metric("/gofer/opened_write_execute_file", true /* sync */, "Number of times a writable+executable file was opened from a gofer.") + +// fileOperations implements fs.FileOperations for a remote file system. +type fileOperations struct { + fsutil.NoIoctl `state:"nosave"` + waiter.AlwaysReady `state:"nosave"` + + // inodeOperations is the inodeOperations backing the file. It is protected + // by a reference held by File.Dirent.Inode which is stable until + // FileOperations.Release is called. + inodeOperations *inodeOperations `state:"wait"` + + // dirCursor is the directory cursor. + dirCursor string + + // handles are the opened remote file system handles, which may + // be shared with other files. + handles *handles `state:"nosave"` + + // flags are the flags used to open handles. + flags fs.FileFlags `state:"wait"` +} + +// fileOperations implements fs.FileOperations. +var _ fs.FileOperations = (*fileOperations)(nil) + +// NewFile returns a file. NewFile is not appropriate with host pipes and sockets. +func NewFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags, i *inodeOperations, handles *handles) *fs.File { + // Remote file systems enforce readability/writability at an offset, + // see fs/9p/vfs_inode.c:v9fs_vfs_atomic_open -> fs/open.c:finish_open. + flags.Pread = true + flags.Pwrite = true + + f := &fileOperations{ + inodeOperations: i, + handles: handles, + flags: flags, + } + if flags.Write { + if err := dirent.Inode.CheckPermission(ctx, fs.PermMask{Execute: true}); err == nil { + name, _ := dirent.FullName(fs.RootFromContext(ctx)) + openedWX.Increment() + log.Warningf("Opened a writable executable: %q", name) + } + } + return fs.NewFile(ctx, dirent, flags, f) +} + +// Release implements fs.FileOpeations.Release. +func (f *fileOperations) Release() { + f.handles.DecRef() +} + +// Readdir implements fs.FileOperations.Readdir. +func (f *fileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + n, err := fs.DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) + if f.inodeOperations.session().cachePolicy != cacheNone { + f.inodeOperations.cachingInodeOps.TouchAccessTime(ctx, file.Dirent.Inode) + } + return n, err +} + +// IterateDir implements fs.DirIterator.IterateDir. +func (f *fileOperations) IterateDir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + f.inodeOperations.readdirMu.Lock() + defer f.inodeOperations.readdirMu.Unlock() + + // Fetch directory entries if needed. + if f.inodeOperations.readdirCache == nil || f.inodeOperations.session().cachePolicy == cacheNone { + entries, err := f.readdirAll(ctx) + if err != nil { + return offset, err + } + + // Cache the readdir result. + f.inodeOperations.readdirCache = fs.NewSortedDentryMap(entries) + } + + // Serialize the entries. + n, err := fs.GenericReaddir(dirCtx, f.inodeOperations.readdirCache) + return offset + n, err +} + +// readdirAll fetches fs.DentAttrs for f, using the attributes of g. +func (f *fileOperations) readdirAll(ctx context.Context) (map[string]fs.DentAttr, error) { + entries := make(map[string]fs.DentAttr) + var readOffset uint64 + for { + // We choose some arbitrary high number of directory entries (64k) and call + // Readdir until we've exhausted them all. + dirents, err := f.handles.File.readdir(ctx, readOffset, 64*1024) + if err != nil { + return nil, err + } + if len(dirents) == 0 { + // We're done, we reached EOF. + break + } + + // The last dirent contains the offset into the next set of dirents. The gofer + // returns the offset as an index into directories, not as a byte offset, because + // converting a byte offset to an index into directories entries is a huge pain. + // But everything is fine if we're consistent. + readOffset = dirents[len(dirents)-1].Offset + + for _, dirent := range dirents { + if dirent.Name == "." || dirent.Name == ".." { + // These must not be included in Readdir results. + continue + } + + // Find a best approximation of the type. + var nt fs.InodeType + switch dirent.Type { + case p9.TypeDir: + nt = fs.Directory + case p9.TypeSymlink: + nt = fs.Symlink + default: + nt = fs.RegularFile + } + + // Install the DentAttr. + entries[dirent.Name] = fs.DentAttr{ + Type: nt, + // Construct the key to find the virtual inode. + // Directory entries reside on the same Device + // and SecondaryDevice as their parent. + InodeID: goferDevice.Map(device.MultiDeviceKey{ + Device: f.inodeOperations.fileState.key.Device, + SecondaryDevice: f.inodeOperations.fileState.key.SecondaryDevice, + Inode: dirent.QID.Path, + }), + } + } + } + + return entries, nil +} + +// Write implements fs.FileOperations.Write. +func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + if fs.IsDir(file.Dirent.Inode.StableAttr) { + // Not all remote file systems enforce this so this client does. + return 0, syserror.EISDIR + } + + // Do cached IO for regular files only. Some character devices expect no caching. + isFile := fs.IsFile(file.Dirent.Inode.StableAttr) + if f.inodeOperations.session().cachePolicy == cacheNone || !isFile { + return src.CopyInTo(ctx, f.handles.readWriterAt(ctx, offset)) + } + return f.inodeOperations.cachingInodeOps.Write(ctx, src, offset) +} + +// Read implements fs.FileOperations.Read. +func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if fs.IsDir(file.Dirent.Inode.StableAttr) { + // Not all remote file systems enforce this so this client does. + return 0, syserror.EISDIR + } + + // Do cached IO for regular files only. Some character devices expect no caching. + isFile := fs.IsFile(file.Dirent.Inode.StableAttr) + if f.inodeOperations.session().cachePolicy == cacheNone || !isFile { + return dst.CopyOutFrom(ctx, f.handles.readWriterAt(ctx, offset)) + } + return f.inodeOperations.cachingInodeOps.Read(ctx, file, dst, offset) +} + +// Fsync implements fs.FileOperations.Fsync. +func (f *fileOperations) Fsync(ctx context.Context, file *fs.File, start int64, end int64, syncType fs.SyncType) error { + switch syncType { + case fs.SyncAll, fs.SyncData: + if err := file.Dirent.Inode.WriteOut(ctx); err != nil { + return err + } + fallthrough + case fs.SyncBackingStorage: + // Sync remote caches. + if f.handles.Host != nil { + // Sync the host fd directly. + return syscall.Fsync(f.handles.Host.FD()) + } + // Otherwise sync on the p9.File handle. + return f.handles.File.fsync(ctx) + } + panic("invalid sync type") +} + +// Flush implements fs.FileOperations.Flush. +func (f *fileOperations) Flush(ctx context.Context, file *fs.File) error { + // If this file is not opened writable then there is nothing to flush. + // We do this because some p9 server implementations of Flush are + // over-zealous. + // + // FIXME: weaken these implementations and remove this check. + if !file.Flags().Write { + return nil + } + // Execute the flush. + return f.handles.File.flush(ctx) +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (f *fileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + if !isFileCachable(f.inodeOperations.session(), file.Dirent.Inode) { + return syserror.ENODEV + } + return fsutil.GenericConfigureMMap(file, f.inodeOperations.cachingInodeOps, opts) +} + +// Seek implements fs.FileOperations.Seek. +func (f *fileOperations) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return fsutil.SeekWithDirCursor(ctx, file, whence, offset, &f.dirCursor) +} diff --git a/pkg/sentry/fs/gofer/file_state.go b/pkg/sentry/fs/gofer/file_state.go new file mode 100644 index 000000000..1d63e33ec --- /dev/null +++ b/pkg/sentry/fs/gofer/file_state.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// afterLoad is invoked by stateify. +func (f *fileOperations) afterLoad() { + load := func() { + f.inodeOperations.fileState.waitForLoad() + + // Manually load the open handles. + var err error + // TODO: Context is not plumbed to save/restore. + f.handles, err = newHandles(context.Background(), f.inodeOperations.fileState.file, f.flags) + if err != nil { + panic("failed to re-open handle: " + err.Error()) + } + f.inodeOperations.fileState.setHandlesForCachedIO(f.flags, f.handles) + } + fs.Async(load) +} diff --git a/pkg/sentry/fs/gofer/fs.go b/pkg/sentry/fs/gofer/fs.go new file mode 100644 index 000000000..0a1a49bbd --- /dev/null +++ b/pkg/sentry/fs/gofer/fs.go @@ -0,0 +1,252 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gofer implements a remote 9p filesystem. +package gofer + +import ( + "errors" + "fmt" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// The following are options defined by the Linux 9p client that we support, +// see Documentation/filesystems/9p.txt. +const ( + // The transport method. + transportKey = "trans" + + // The file tree to access when the file server + // is exporting several file systems. Stands for "attach name". + anameKey = "aname" + + // The caching policy. + cacheKey = "cache" + + // The file descriptor for reading with trans=fd. + readFDKey = "rfdno" + + // The file descriptor for writing with trans=fd. + writeFDKey = "wfdno" + + // The number of bytes to use for a 9p packet payload. + msizeKey = "msize" + + // The 9p protocol version. + versionKey = "version" + + // If set to true allows the creation of unix domain sockets inside the + // sandbox using files backed by the gofer. If set to false, unix sockets + // cannot be bound to gofer files without an overlay on top. + privateUnixSocketKey = "privateunixsocket" +) + +// cachePolicy is a 9p cache policy. +type cachePolicy string + +const ( + // Use virtual file system cache. + cacheAll cachePolicy = "fscache" + + // TODO: fully support cache=none. + cacheNone cachePolicy = "none" + + // defaultCache is cacheAll. Note this diverges from the 9p Linux + // client whose default is "none". See TODO above. + defaultCache = cacheAll +) + +// defaultAname is the default attach name. +const defaultAname = "/" + +// defaultMSize is the message size used for chunking large read and write requests. +// This has been tested to give good enough performance up to 64M. +const defaultMSize = 1024 * 1024 // 1M + +// defaultVersion is the default 9p protocol version. Will negotiate downwards with +// file server if needed. +var defaultVersion = p9.HighestVersionString() + +// Number of names of non-children to cache, preventing unneeded walks. 64 is +// plenty for nodejs, which seems to stat about 4 children on every require(). +const nonChildrenCacheSize = 64 + +var ( + // ErrNoTransport is returned when there is no 'trans' option. + ErrNoTransport = errors.New("missing required option: 'trans='") + + // ErrNoReadFD is returned when there is no 'rfdno' option. + ErrNoReadFD = errors.New("missing required option: 'rfdno='") + + // ErrNoWriteFD is returned when there is no 'wfdno' option. + ErrNoWriteFD = errors.New("missing required option: 'wfdno='") +) + +// filesystem is a 9p client. +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name under which the filesystem is registered. +// The name matches fs/9p/vfs_super.c:v9fs_fs_type.name. +const FilesystemName = "9p" + +// Name is the name of the filesystem. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount prohibits users from using mount(2) with this file system. +func (*filesystem) AllowUserMount() bool { + return false +} + +// Flags returns that there is nothing special about this file system. +// +// The 9p Linux client returns FS_RENAME_DOES_D_MOVE, see fs/9p/vfs_super.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns an attached 9p client that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // Parse and validate the mount options. + o, err := options(data) + if err != nil { + return nil, err + } + + // Construct the 9p root to mount. We intentionally diverge from Linux in that + // the first Tversion and Tattach requests are done lazily. + return Root(ctx, device, f, flags, o) +} + +// opts are parsed 9p mount options. +type opts struct { + fd int + aname string + policy cachePolicy + msize uint32 + version string + privateunixsocket bool +} + +// options parses mount(2) data into structured options. +func options(data string) (opts, error) { + var o opts + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Check for the required 'trans=fd' option. + trans, ok := options[transportKey] + if !ok { + return o, ErrNoTransport + } + if trans != "fd" { + return o, fmt.Errorf("unsupported transport: 'trans=%s'", trans) + } + delete(options, transportKey) + + // Check for the required 'rfdno=' option. + srfd, ok := options[readFDKey] + if !ok { + return o, ErrNoReadFD + } + delete(options, readFDKey) + + // Check for the required 'wfdno=' option. + swfd, ok := options[writeFDKey] + if !ok { + return o, ErrNoWriteFD + } + delete(options, writeFDKey) + + // Parse the read fd. + rfd, err := strconv.Atoi(srfd) + if err != nil { + return o, fmt.Errorf("invalid fd for 'rfdno=%s': %v", srfd, err) + } + + // Parse the write fd. + wfd, err := strconv.Atoi(swfd) + if err != nil { + return o, fmt.Errorf("invalid fd for 'wfdno=%s': %v", swfd, err) + } + + // Require that the read and write fd are the same. + if rfd != wfd { + return o, fmt.Errorf("fd in 'rfdno=%d' and 'wfdno=%d' must match", rfd, wfd) + } + o.fd = rfd + + // Parse the attach name. + o.aname = defaultAname + if an, ok := options[anameKey]; ok { + o.aname = an + delete(options, anameKey) + } + + // Parse the cache policy. Reject unsupported policies. + o.policy = cacheAll + if cp, ok := options[cacheKey]; ok { + if cachePolicy(cp) != cacheAll && cachePolicy(cp) != cacheNone { + return o, fmt.Errorf("unsupported cache mode: 'cache=%s'", cp) + } + o.policy = cachePolicy(cp) + delete(options, cacheKey) + } + + // Parse the message size. Reject malformed options. + o.msize = uint32(defaultMSize) + if m, ok := options[msizeKey]; ok { + i, err := strconv.ParseUint(m, 10, 32) + if err != nil { + return o, fmt.Errorf("invalid message size for 'msize=%s': %v", m, err) + } + o.msize = uint32(i) + delete(options, msizeKey) + } + + // Parse the protocol version. + o.version = defaultVersion + if v, ok := options[versionKey]; ok { + o.version = v + delete(options, versionKey) + } + + // Parse the unix socket policy. Reject non-booleans. + if v, ok := options[privateUnixSocketKey]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return o, fmt.Errorf("invalid boolean value for '%s=%s': %v", privateUnixSocketKey, v, err) + } + o.privateunixsocket = b + delete(options, privateUnixSocketKey) + } + + // Fail to attach if the caller wanted us to do something that we + // don't support. + if len(options) > 0 { + return o, fmt.Errorf("unsupported mount options: %v", options) + } + + return o, nil +} diff --git a/pkg/sentry/fs/gofer/gofer_test.go b/pkg/sentry/fs/gofer/gofer_test.go new file mode 100644 index 000000000..58a2e2ef5 --- /dev/null +++ b/pkg/sentry/fs/gofer/gofer_test.go @@ -0,0 +1,776 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "errors" + "fmt" + "io" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/p9/p9test" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// A errMock is an error that comes from bad usage of the mock. +var errMock = errors.New("mock error") + +// goodMockFile returns a file that can be Walk'ed to and created. +func goodMockFile(mode p9.FileMode, size uint64) *p9test.FileMock { + return &p9test.FileMock{ + GetAttrMock: p9test.GetAttrMock{ + Valid: p9.AttrMask{Mode: true, Size: true, RDev: true}, + Attr: p9.Attr{Mode: mode, Size: size, RDev: 0}, + }, + } +} + +func newClosedSocket() (*unet.Socket, error) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, err + } + + s, err := unet.NewSocket(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + + return s, s.Close() +} + +// root returns a p9 file mock and an fs.InodeOperations created from that file. Any +// functions performed on fs.InodeOperations will use the p9 file mock. +func root(ctx context.Context, mode p9.FileMode, size uint64) (*p9test.FileMock, *fs.Inode, error) { + sock, err := newClosedSocket() + if err != nil { + return nil, nil, err + } + + // Construct a dummy session that we can destruct. + s := &session{ + conn: sock, + mounter: fs.RootOwner, + cachePolicy: cacheNone, + } + + rootFile := goodMockFile(mode, size) + sattr, rootInodeOperations := newInodeOperations(ctx, s, contextFile{file: rootFile}, p9.QID{}, rootFile.GetAttrMock.Valid, rootFile.GetAttrMock.Attr) + m := fs.NewMountSource(s, &filesystem{}, fs.MountSourceFlags{}) + return rootFile, fs.NewInode(rootInodeOperations, m, sattr), nil +} + +func TestLookup(t *testing.T) { + // Test parameters. + type lookupTest struct { + // Name of the test. + name string + + // Function input parameters. + fileName string + + // Expected return value. + want error + } + + tests := []lookupTest{ + { + name: "mock Walk passes (function succeeds)", + fileName: "ppp", + want: nil, + }, + { + name: "mock Walk fails (function fails)", + fileName: "ppp", + want: syscall.ENOENT, + }, + } + + ctx := contexttest.Context(t) + for _, test := range tests { + // Set up mock. + rootFile, rootInode, err := root(ctx, p9.PermissionsMask, 0) + if err != nil { + t.Errorf("TestWalk %s failed: root error got %v, want nil", test.name, err) + } + + rootFile.WalkGetAttrMock.QIDs = []p9.QID{{}} + rootFile.WalkGetAttrMock.Err = test.want + rootFile.WalkGetAttrMock.File = goodMockFile(p9.PermissionsMask, 0) + + // Call function. + dirent, err := rootInode.Lookup(ctx, test.fileName) + + // Unwrap the InodeOperations. + var newInodeOperations fs.InodeOperations + if dirent != nil { + if dirent.IsNegative() { + err = syscall.ENOENT + } else { + newInodeOperations = dirent.Inode.InodeOperations + } + } + + // Check return values. + if err != test.want { + t.Errorf("TestWalk %s failed: got %v, want %v", test.name, err, test.want) + } + if err == nil && newInodeOperations == nil { + t.Errorf("TestWalk %s failed: expected either non-nil err or non-nil node, but both are nil", test.name) + } + + // Check mock parameters. + if !rootFile.WalkGetAttrMock.Called { + t.Errorf("TestWalk %s failed: GetAttr not called; error: %v", test.name, err) + } else if rootFile.WalkGetAttrMock.Names[0] != test.fileName { + t.Errorf("TestWalk %s failed: file name not set", test.name) + } + } +} + +func TestSetTimestamps(t *testing.T) { + // Test parameters. + type setTimestampsTest struct { + // Name of the test. + name string + + // Function input parameters. + ts fs.TimeSpec + } + + ctx := contexttest.Context(t) + now := ktime.NowFromContext(ctx) + tests := []setTimestampsTest{ + { + name: "mock SetAttr passes (function succeeds)", + ts: fs.TimeSpec{ + ATime: now, + MTime: now, + }, + }, + { + name: "mock SetAttr passes, times are 0 (function succeeds)", + ts: fs.TimeSpec{}, + }, + { + name: "mock SetAttr passes, times are 0 and not system time (function succeeds)", + ts: fs.TimeSpec{ + ATimeSetSystemTime: false, + MTimeSetSystemTime: false, + }, + }, + { + name: "mock SetAttr passes, times are set to system time (function succeeds)", + ts: fs.TimeSpec{ + ATimeSetSystemTime: true, + MTimeSetSystemTime: true, + }, + }, + { + name: "mock SetAttr passes, times are omitted (function succeeds)", + ts: fs.TimeSpec{ + ATimeOmit: true, + MTimeOmit: true, + }, + }, + } + + for _, test := range tests { + // Set up mock. + rootFile, rootInode, err := root(ctx, p9.PermissionsMask, 0) + if err != nil { + t.Errorf("TestSetTimestamps %s failed: root error got %v, want nil", test.name, err) + } + + // Call function. + err = rootInode.SetTimestamps(ctx, nil /* Dirent */, test.ts) + + // Check return values. + if err != nil { + t.Errorf("TestSetTimestamps %s failed: got %v, want nil", test.name, err) + } + + // Check mock parameters. + if !(test.ts.ATimeOmit && test.ts.MTimeOmit) && !rootFile.SetAttrMock.Called { + t.Errorf("TestSetTimestamps %s failed: SetAttr not called", test.name) + continue + } + + // Check what was passed to the mock function. + attr := rootFile.SetAttrMock.Attr + atimeGiven := ktime.FromUnix(int64(attr.ATimeSeconds), int64(attr.ATimeNanoSeconds)) + if test.ts.ATimeOmit { + if rootFile.SetAttrMock.Valid.ATime { + t.Errorf("TestSetTimestamps %s failed: ATime got set true in mask, wanted false", test.name) + } + } else { + if got, want := rootFile.SetAttrMock.Valid.ATimeNotSystemTime, !test.ts.ATimeSetSystemTime; got != want { + t.Errorf("TestSetTimestamps %s failed: got ATimeNotSystemTime %v, want %v", test.name, got, want) + } + if !test.ts.ATimeSetSystemTime && !test.ts.ATime.Equal(atimeGiven) { + t.Errorf("TestSetTimestamps %s failed: ATime got %v, want %v", test.name, atimeGiven, test.ts.ATime) + } + } + + mtimeGiven := ktime.FromUnix(int64(attr.MTimeSeconds), int64(attr.MTimeNanoSeconds)) + if test.ts.MTimeOmit { + if rootFile.SetAttrMock.Valid.MTime { + t.Errorf("TestSetTimestamps %s failed: MTime got set true in mask, wanted false", test.name) + } + } else { + if got, want := rootFile.SetAttrMock.Valid.MTimeNotSystemTime, !test.ts.MTimeSetSystemTime; got != want { + t.Errorf("TestSetTimestamps %s failed: got MTimeNotSystemTime %v, want %v", test.name, got, want) + } + if !test.ts.MTimeSetSystemTime && !test.ts.MTime.Equal(mtimeGiven) { + t.Errorf("TestSetTimestamps %s failed: MTime got %v, want %v", test.name, mtimeGiven, test.ts.MTime) + } + } + + } +} + +func TestSetPermissions(t *testing.T) { + // Test parameters. + type setPermissionsTest struct { + // Name of the test. + name string + + // SetPermissions input parameters. + perms fs.FilePermissions + + // Error that SetAttr mock should return. + setAttrErr error + + // Expected return value. + want bool + } + + tests := []setPermissionsTest{ + { + name: "SetAttr mock succeeds (function succeeds)", + perms: fs.FilePermissions{User: fs.PermMask{Read: true, Write: true, Execute: true}}, + want: true, + setAttrErr: nil, + }, + { + name: "SetAttr mock fails (function fails)", + perms: fs.FilePermissions{User: fs.PermMask{Read: true, Write: true}}, + want: false, + setAttrErr: syscall.ENOENT, + }, + } + + ctx := contexttest.Context(t) + for _, test := range tests { + // Set up mock. + rootFile, rootInode, err := root(ctx, 0, 0) + if err != nil { + t.Errorf("TestSetPermissions %s failed: root error got %v, want nil", test.name, err) + } + rootFile.SetAttrMock.Err = test.setAttrErr + + ok := rootInode.SetPermissions(ctx, nil /* Dirent */, test.perms) + + // Check return value. + if ok != test.want { + t.Errorf("TestSetPermissions %s failed: got %v, want %v", test.name, ok, test.want) + } + + // Check mock parameters. + pattr := rootFile.SetAttrMock.Attr + if !rootFile.SetAttrMock.Called { + t.Errorf("TestSetPermissions %s failed: SetAttr not called", test.name) + continue + } + if !rootFile.SetAttrMock.Valid.Permissions { + t.Errorf("TestSetPermissions %s failed: SetAttr did not get right request (got false, expected SetAttrMask.Permissions true)", + test.name) + } + if got := fs.FilePermsFromP9(pattr.Permissions); got != test.perms { + t.Errorf("TestSetPermissions %s failed: SetAttr did not get right permissions -- got %v, want %v", + test.name, got, test.perms) + } + } +} + +func TestClose(t *testing.T) { + ctx := contexttest.Context(t) + // Set up mock. + rootFile, rootInode, err := root(ctx, p9.PermissionsMask, 0) + if err != nil { + t.Errorf("TestClose failed: root error got %v, want nil", err) + } + + // Call function. + rootInode.InodeOperations.Release(ctx) + + // Check mock parameters. + if !rootFile.CloseMock.Called { + t.Errorf("TestClose failed: Close not called") + } +} + +func TestRename(t *testing.T) { + // Test parameters. + type renameTest struct { + // Name of the test. + name string + + // Input parameters. + newParent *fs.Inode + newName string + + // Rename mock parameters. + renameErr error + renameCalled bool + + // Error want to return given the parameters. (Same as what + // we expect and tell rename to return.) + want error + } + ctx := contexttest.Context(t) + rootFile, rootInode, err := root(ctx, p9.PermissionsMask, 0) + if err != nil { + t.Errorf("TestRename failed: root error got %v, want nil", err) + } + + tests := []renameTest{ + { + name: "mock Rename succeeds (function succeeds)", + newParent: rootInode, + newName: "foo2", + want: nil, + renameErr: nil, + renameCalled: true, + }, + { + name: "mock Rename fails (function fails)", + newParent: rootInode, + newName: "foo2", + want: syscall.ENOENT, + renameErr: syscall.ENOENT, + renameCalled: true, + }, + { + name: "newParent is not inodeOperations but should be (function fails)", + newParent: fs.NewMockInode(ctx, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Directory}), + newName: "foo2", + want: syscall.EXDEV, + renameErr: nil, + renameCalled: false, + }, + } + + for _, test := range tests { + mockFile := goodMockFile(p9.PermissionsMask, 0) + rootFile.WalkGetAttrMock.QIDs = []p9.QID{{}} + rootFile.WalkGetAttrMock.File = mockFile + + dirent, err := rootInode.Lookup(ctx, "foo") + if err != nil { + t.Fatalf("root.Walk failed: %v", err) + } + mockFile.RenameMock.Err = test.renameErr + mockFile.RenameMock.Called = false + + // Use a dummy oldParent to acquire write access to that directory. + oldParent := &inodeOperations{ + readdirCache: fs.NewSortedDentryMap(nil), + } + oldInode := fs.NewInode(oldParent, fs.NewMockMountSource(nil), fs.StableAttr{Type: fs.Directory}) + + // Call function. + err = dirent.Inode.InodeOperations.Rename(ctx, oldInode, "", test.newParent, test.newName) + + // Check return value. + if err != test.want { + t.Errorf("TestRename %s failed: got %v, want %v", test.name, err, test.want) + } + + // Check mock parameters. + if got, want := mockFile.RenameMock.Called, test.renameCalled; got != want { + t.Errorf("TestRename %s failed: renameCalled got %v want %v", test.name, got, want) + } + } +} + +// This file is read from in TestPreadv. +type readAtFileFake struct { + p9test.FileMock + + // Parameters for faking ReadAt. + FileLength int + Err error + ChunkSize int + Called bool + LengthRead int +} + +func (r *readAtFileFake) ReadAt(p []byte, offset uint64) (int, error) { + r.Called = true + log.Warningf("ReadAt fake: length read so far = %d, len(p) = %d, offset = %d", r.LengthRead, len(p), offset) + if int(offset) != r.LengthRead { + return 0, fmt.Errorf("offset got %d; expected %d", offset, r.LengthRead) + } + + if r.Err != nil { + return 0, r.Err + } + + if r.LengthRead >= r.FileLength { + return 0, io.EOF + } + + // Read at most ChunkSize and read at most what's left in the file. + toBeRead := len(p) + if r.LengthRead+toBeRead >= r.FileLength { + toBeRead = r.FileLength - int(offset) + } + if toBeRead > r.ChunkSize { + toBeRead = r.ChunkSize + } + + r.LengthRead += toBeRead + if r.LengthRead == r.FileLength { + return toBeRead, io.EOF + } + return toBeRead, nil +} + +func TestPreadv(t *testing.T) { + // Test parameters. + type preadvTest struct { + // Name of the test. + name string + + // Mock parameters + mode p9.FileMode + + // Buffer to read into. + buffer [512]byte + sliceSize int + + // How much readAt returns at a time. + chunkSize int + + // Whether or not we expect ReadAt to be called. + readAtCalled bool + readAtErr error + + // Expected return values. + want error + } + + tests := []preadvTest{ + { + name: "fake ReadAt succeeds, 512 bytes requested, 512 byte chunks (function succeeds)", + want: nil, + readAtErr: nil, + mode: p9.PermissionsMask, + readAtCalled: true, + sliceSize: 512, + chunkSize: 512, + }, + { + name: "fake ReadAt succeeds, 512 bytes requested, 200 byte chunks (function succeeds)", + want: nil, + readAtErr: nil, + mode: p9.PermissionsMask, + readAtCalled: true, + sliceSize: 512, + chunkSize: 200, + }, + { + name: "fake ReadAt succeeds, 0 bytes requested (function succeeds)", + want: nil, + readAtErr: nil, + mode: p9.PermissionsMask, + readAtCalled: false, + sliceSize: 0, + chunkSize: 100, + }, + { + name: "fake ReadAt returns 0 bytes and EOF (function fails)", + want: io.EOF, + readAtErr: io.EOF, + mode: p9.PermissionsMask, + readAtCalled: true, + sliceSize: 512, + chunkSize: 512, + }, + } + + ctx := contexttest.Context(t) + for _, test := range tests { + // Set up mock. + rootFile, rootInode, err := root(ctx, test.mode, 1024) + if err != nil { + t.Errorf("TestPreadv %s failed: root error got %v, want nil", test.name, err) + } + + // Set up the read buffer. + dst := usermem.BytesIOSequence(test.buffer[:test.sliceSize]) + + // This file will be read from. + openFile := &readAtFileFake{ + Err: test.readAtErr, + FileLength: test.sliceSize, + ChunkSize: test.chunkSize, + } + rootFile.WalkGetAttrMock.File = openFile + rootFile.WalkGetAttrMock.Attr.Mode = test.mode + rootFile.WalkGetAttrMock.Valid.Mode = true + + f := NewFile( + ctx, + fs.NewDirent(rootInode, ""), + fs.FileFlags{Read: true}, + rootInode.InodeOperations.(*inodeOperations), + &handles{File: contextFile{file: openFile}}, + ) + + // Call function. + _, err = f.Preadv(ctx, dst, 0) + + // Check return value. + if err != test.want { + t.Errorf("TestPreadv %s failed: got %v, want %v", test.name, err, test.want) + } + + // Check mock parameters. + if test.readAtCalled != openFile.Called { + t.Errorf("TestPreadv %s failed: ReadAt called: %v, but expected opposite", test.name, openFile.Called) + } + } +} + +func TestReadlink(t *testing.T) { + // Test parameters. + type readlinkTest struct { + // Name of the test. + name string + + // Mock parameters + mode p9.FileMode + + // Whether or not we expect ReadAt to be called and what error + // it shall return. + readlinkCalled bool + readlinkErr error + + // Expected return values. + want error + } + + tests := []readlinkTest{ + { + name: "file is not symlink (function fails)", + want: syscall.ENOLINK, + mode: p9.PermissionsMask, + readlinkCalled: false, + readlinkErr: nil, + }, + { + name: "mock Readlink succeeds (function succeeds)", + want: nil, + mode: p9.PermissionsMask | p9.ModeSymlink, + readlinkCalled: true, + readlinkErr: nil, + }, + { + name: "mock Readlink fails (function fails)", + want: syscall.ENOENT, + mode: p9.PermissionsMask | p9.ModeSymlink, + readlinkCalled: true, + readlinkErr: syscall.ENOENT, + }, + } + + ctx := contexttest.Context(t) + for _, test := range tests { + // Set up mock. + rootFile, rootInode, err := root(ctx, test.mode, 0) + if err != nil { + t.Errorf("TestReadlink %s failed: root error got %v, want nil", test.name, err) + } + + openFile := goodMockFile(test.mode, 0) + rootFile.WalkMock.File = openFile + rootFile.ReadlinkMock.Err = test.readlinkErr + + // Call function. + _, err = rootInode.Readlink(ctx) + + // Check return value. + if err != test.want { + t.Errorf("TestReadlink %s failed: got %v, want %v", test.name, err, test.want) + } + + // Check mock parameters. + if test.readlinkCalled && !rootFile.ReadlinkMock.Called { + t.Errorf("TestReadlink %s failed: Readlink not called", test.name) + } + } +} + +// This file is write from in TestPwritev. +type writeAtFileFake struct { + p9test.FileMock + + // Parameters for faking WriteAt. + Err error + ChunkSize int + Called bool + LengthWritten int +} + +func (r *writeAtFileFake) WriteAt(p []byte, offset uint64) (int, error) { + r.Called = true + log.Warningf("WriteAt fake: length written so far = %d, len(p) = %d, offset = %d", r.LengthWritten, len(p), offset) + if int(offset) != r.LengthWritten { + return 0, fmt.Errorf("offset got %d; want %d", offset, r.LengthWritten) + } + + if r.Err != nil { + return 0, r.Err + } + + // Write at most ChunkSize. + toBeWritten := len(p) + if toBeWritten > r.ChunkSize { + toBeWritten = r.ChunkSize + } + r.LengthWritten += toBeWritten + return toBeWritten, nil +} + +func TestPwritev(t *testing.T) { + // Test parameters. + type pwritevTest struct { + // Name of the test. + name string + + // Mock parameters + mode p9.FileMode + + allowWrite bool + + // Buffer to write into. + buffer [512]byte + sliceSize int + chunkSize int + + // Whether or not we expect writeAt to be called. + writeAtCalled bool + writeAtErr error + + // Expected return values. + want error + } + + tests := []pwritevTest{ + { + name: "fake writeAt succeeds, one chunk (function succeeds)", + want: nil, + writeAtErr: nil, + mode: p9.PermissionsMask, + allowWrite: true, + writeAtCalled: true, + sliceSize: 512, + chunkSize: 512, + }, + { + name: "fake writeAt fails, short write (function fails)", + want: io.ErrShortWrite, + writeAtErr: nil, + mode: p9.PermissionsMask, + allowWrite: true, + writeAtCalled: true, + sliceSize: 512, + chunkSize: 200, + }, + { + name: "fake writeAt succeeds, len 0 (function succeeds)", + want: nil, + writeAtErr: nil, + mode: p9.PermissionsMask, + allowWrite: true, + writeAtCalled: false, + sliceSize: 0, + chunkSize: 0, + }, + { + name: "writeAt can still write despite file permissions read only (function succeeds)", + want: nil, + writeAtErr: nil, + mode: p9.PermissionsMask, + allowWrite: false, + writeAtCalled: true, + sliceSize: 512, + chunkSize: 512, + }, + } + + ctx := contexttest.Context(t) + for _, test := range tests { + // Set up mock. + _, rootInode, err := root(ctx, test.mode, 0) + if err != nil { + t.Errorf("TestPwritev %s failed: root error got %v, want nil", test.name, err) + } + + src := usermem.BytesIOSequence(test.buffer[:test.sliceSize]) + + // This is the file that will be used for writing. + openFile := &writeAtFileFake{ + Err: test.writeAtErr, + ChunkSize: test.chunkSize, + } + + f := NewFile( + ctx, + fs.NewDirent(rootInode, ""), + fs.FileFlags{Write: true}, + rootInode.InodeOperations.(*inodeOperations), + &handles{File: contextFile{file: openFile}}, + ) + + // Call function. + _, err = f.Pwritev(ctx, src, 0) + + // Check return value. + if err != test.want { + t.Errorf("TestPwritev %s failed: got %v, want %v", test.name, err, test.want) + } + + // Check mock parameters. + if test.writeAtCalled != openFile.Called { + t.Errorf("TestPwritev %s failed: WriteAt called: %v, but expected opposite", test.name, openFile.Called) + continue + } + if openFile.Called && test.writeAtErr != nil && openFile.LengthWritten != test.sliceSize { + t.Errorf("TestPwritev %s failed: wrote %d bytes, expected %d bytes written", test.name, openFile.LengthWritten, test.sliceSize) + } + } +} diff --git a/pkg/sentry/fs/gofer/handles.go b/pkg/sentry/fs/gofer/handles.go new file mode 100644 index 000000000..a660c9230 --- /dev/null +++ b/pkg/sentry/fs/gofer/handles.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "io" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/secio" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" +) + +// handles are the open handles of a gofer file. They are reference counted to +// support open handle sharing between files for read only filesystems. +// +// If Host != nil then it will be used exclusively over File. +type handles struct { + refs.AtomicRefCount + + // File is a p9.File handle. Must not be nil. + File contextFile + + // Host is an *fd.FD handle. May be nil. + Host *fd.FD +} + +// DecRef drops a reference on handles. +func (h *handles) DecRef() { + h.DecRefWithDestructor(func() { + if h.Host != nil { + if err := h.Host.Close(); err != nil { + log.Warningf("error closing host file: %v", err) + } + } + // FIXME: Context is not plumbed here. + if err := h.File.close(context.Background()); err != nil { + log.Warningf("error closing p9 file: %v", err) + } + }) +} + +func newHandles(ctx context.Context, file contextFile, flags fs.FileFlags) (*handles, error) { + _, newFile, err := file.walk(ctx, nil) + if err != nil { + return nil, err + } + + switch { + case flags.Read && flags.Write: + hostFile, _, _, err := newFile.open(ctx, p9.ReadWrite) + if err != nil { + newFile.close(ctx) + return nil, err + } + h := &handles{ + File: newFile, + Host: hostFile, + } + return h, nil + case flags.Read && !flags.Write: + hostFile, _, _, err := newFile.open(ctx, p9.ReadOnly) + if err != nil { + newFile.close(ctx) + return nil, err + } + h := &handles{ + File: newFile, + Host: hostFile, + } + return h, nil + case !flags.Read && flags.Write: + hostFile, _, _, err := newFile.open(ctx, p9.WriteOnly) + if err != nil { + newFile.close(ctx) + return nil, err + } + h := &handles{ + File: newFile, + Host: hostFile, + } + return h, nil + default: + panic("impossible fs.FileFlags") + } +} + +type handleReadWriter struct { + ctx context.Context + h *handles + off int64 +} + +func (h *handles) readWriterAt(ctx context.Context, offset int64) *handleReadWriter { + return &handleReadWriter{ctx, h, offset} +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *handleReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + var r io.Reader + if rw.h.Host != nil { + r = secio.NewOffsetReader(rw.h.Host, rw.off) + } else { + r = &p9.ReadWriterFile{File: rw.h.File.file, Offset: uint64(rw.off)} + } + + rw.ctx.UninterruptibleSleepStart(false) + defer rw.ctx.UninterruptibleSleepFinish(false) + n, err := safemem.FromIOReader{r}.ReadToBlocks(dsts) + rw.off += int64(n) + return n, err +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (rw *handleReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + var w io.Writer + if rw.h.Host != nil { + w = secio.NewOffsetWriter(rw.h.Host, rw.off) + } else { + w = &p9.ReadWriterFile{File: rw.h.File.file, Offset: uint64(rw.off)} + } + + rw.ctx.UninterruptibleSleepStart(false) + defer rw.ctx.UninterruptibleSleepFinish(false) + n, err := safemem.FromIOWriter{w}.WriteFromBlocks(srcs) + rw.off += int64(n) + return n, err +} diff --git a/pkg/sentry/fs/gofer/inode.go b/pkg/sentry/fs/gofer/inode.go new file mode 100644 index 000000000..454242923 --- /dev/null +++ b/pkg/sentry/fs/gofer/inode.go @@ -0,0 +1,554 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "errors" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fdpipe" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/host" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// inodeOperations implements fs.InodeOperations. +type inodeOperations struct { + fsutil.InodeNotVirtual `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.DeprecatedFileOperations `state:"nosave"` + + // fileState implements fs.CachedFileObject. It exists + // to break a circular load dependency between inodeOperations + // and cachingInodeOps (below). + fileState *inodeFileState `state:"wait"` + + // cachingInodeOps implement memmap.Mappable for inodeOperations. + cachingInodeOps *fsutil.CachingInodeOperations + + // readdirMu protects readdirCache and concurrent Readdirs. + readdirMu sync.Mutex `state:"nosave"` + + // readdirCache is a cache of readdir results in the form of + // a fs.SortedDentryMap. + // + // Starts out as nil, and is initialized under readdirMu lazily; + // invalidating the cache means setting it to nil. + readdirCache *fs.SortedDentryMap `state:"nosave"` +} + +// inodeFileState implements fs.CachedFileObject and otherwise fully +// encapsulates state that needs to be manually loaded on restore for +// this file object. +// +// This unfortunate structure exists because fs.CachingInodeOperations +// defines afterLoad and therefore cannot be lazily loaded (to break a +// circular load dependency between it and inodeOperations). Even with +// lazy loading, this approach defines the dependencies between objects +// and the expected load behavior more concretely. +type inodeFileState struct { + // s is common file system state for Gofers. + s *session `state:"wait"` + + // MultiDeviceKey consists of: + // + // * Device: file system device from a specific gofer. + // * SecondaryDevice: unique identifier of the attach point. + // * Inode: the inode of this resource, unique per Device.= + // + // These fields combined enable consistent hashing of virtual inodes + // on goferDevice. + key device.MultiDeviceKey `state:"nosave"` + + // file is the p9 file that contains a single unopened fid. + file contextFile `state:"nosave"` + + // sattr caches the stable attributes. + sattr fs.StableAttr `state:"wait"` + + // handlesMu protects the below fields. + handlesMu sync.RWMutex `state:"nosave"` + + // Do minimal open handle caching: only for read only filesystems. + readonly *handles `state:"nosave"` + + // Maintain readthrough handles for populating page caches. + readthrough *handles `state:"nosave"` + + // Maintain writeback handles for syncing from page caches. + writeback *handles `state:"nosave"` + + // writebackRW indicates whether writeback is opened read-write. If + // it is not and a read-write handle could replace writeback (above), + // then writeback is replaced with the read-write handle. This + // ensures that files that were first opened write-only and then + // later are opened read-write to be mapped can in fact be mapped. + writebackRW bool + + // loading is acquired when the inodeFileState begins an asynchronous + // load. It releases when the load is complete. Callers that require all + // state to be available should call waitForLoad() to ensure that. + loading sync.Mutex `state:".(struct{})"` + + // savedUAttr is only allocated during S/R. It points to the save-time + // unstable attributes and is used to validate restore-time ones. + // + // Note that these unstable attributes are only used to detect cross-S/R + // external file system metadata changes. They may differ from the + // cached unstable attributes in cachingInodeOps, as that might differ + // from the external file system attributes if there had been WriteOut + // failures. S/R is transparent to Sentry and the latter will continue + // using its cached values after restore. + savedUAttr *fs.UnstableAttr +} + +// Release releases file handles. +func (i *inodeFileState) Release(ctx context.Context) { + i.file.close(ctx) + if i.readonly != nil { + i.readonly.DecRef() + } + if i.readthrough != nil { + i.readthrough.DecRef() + } + if i.writeback != nil { + i.writeback.DecRef() + } +} + +// setHandlesForCachedIO installs file handles for reading and writing +// through fs.CachingInodeOperations. +func (i *inodeFileState) setHandlesForCachedIO(flags fs.FileFlags, h *handles) { + i.handlesMu.Lock() + defer i.handlesMu.Unlock() + + if flags.Read { + if i.readthrough == nil { + h.IncRef() + i.readthrough = h + } + } + if flags.Write { + if i.writeback == nil { + h.IncRef() + i.writeback = h + } else if !i.writebackRW && flags.Read { + i.writeback.DecRef() + h.IncRef() + i.writeback = h + } + if flags.Read { + i.writebackRW = true + } + } +} + +// getCachedHandles returns any cached handles which would accelerate +// performance generally. These handles should only be used if the mount +// supports caching. This is distinct from fs.CachingInodeOperations +// which is used for a limited set of file types (those that can be mapped). +func (i *inodeFileState) getCachedHandles(ctx context.Context, flags fs.FileFlags, msrc *fs.MountSource) (*handles, bool) { + i.handlesMu.Lock() + defer i.handlesMu.Unlock() + + if flags.Read && !flags.Write && msrc.Flags.ReadOnly { + if i.readonly != nil { + i.readonly.IncRef() + return i.readonly, true + } + h, err := newHandles(ctx, i.file, flags) + if err != nil { + return nil, false + } + i.readonly = h + i.readonly.IncRef() + return i.readonly, true + } + + return nil, false +} + +// ReadToBlocksAt implements fsutil.CachedFileObject.ReadToBlocksAt. +func (i *inodeFileState) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + return i.readthrough.readWriterAt(ctx, int64(offset)).ReadToBlocks(dsts) +} + +// WriteFromBlocksAt implements fsutil.CachedFileObject.WriteFromBlocksAt. +func (i *inodeFileState) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + return i.writeback.readWriterAt(ctx, int64(offset)).WriteFromBlocks(srcs) +} + +// SetMaskedAttributes implements fsutil.CachedFileObject.SetMaskedAttributes. +func (i *inodeFileState) SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr) error { + if mask.Empty() { + return nil + } + as, ans := attr.AccessTime.Unix() + ms, mns := attr.ModificationTime.Unix() + // An update of status change time is implied by mask.AccessTime + // or mask.ModificationTime. Updating status change time to a + // time earlier than the system time is not possible. + return i.file.setAttr( + ctx, + p9.SetAttrMask{ + Permissions: mask.Perms, + Size: mask.Size, + UID: mask.UID, + GID: mask.GID, + ATime: mask.AccessTime, + ATimeNotSystemTime: true, + MTime: mask.ModificationTime, + MTimeNotSystemTime: true, + }, p9.SetAttr{ + Permissions: p9.FileMode(attr.Perms.LinuxMode()), + UID: p9.UID(attr.Owner.UID), + GID: p9.GID(attr.Owner.GID), + Size: uint64(attr.Size), + ATimeSeconds: uint64(as), + ATimeNanoSeconds: uint64(ans), + MTimeSeconds: uint64(ms), + MTimeNanoSeconds: uint64(mns), + }) +} + +// Sync implements fsutil.CachedFileObject.Sync. +func (i *inodeFileState) Sync(ctx context.Context) error { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + if i.writeback == nil { + return nil + } + return i.writeback.File.fsync(ctx) +} + +// FD implements fsutil.CachedFileObject.FD. +// +// FD meets the requirements of fsutil.CachedFileObject.FD because p9.File.Open +// returns a host file descriptor to back _both_ readthrough and writeback or +// not at all (e.g. both are nil). +func (i *inodeFileState) FD() int { + i.handlesMu.RLock() + defer i.handlesMu.RUnlock() + + // Assert that the file was actually opened. + if i.writeback == nil && i.readthrough == nil { + panic("cannot get host FD for a file that was never opened") + } + // If this file is mapped, then it must have been opened + // read-write and i.writeback was upgraded to a read-write + // handle. Prefer that to map. + if i.writeback != nil { + if i.writeback.Host == nil { + return -1 + } + return int(i.writeback.Host.FD()) + } + // Otherwise the file may only have been opened readable + // so far. That's the only way it can be accessed. + if i.readthrough.Host == nil { + return -1 + } + return int(i.readthrough.Host.FD()) +} + +// waitForLoad makes sure any restore-issued loading is done. +func (i *inodeFileState) waitForLoad() { + // This is not a no-op. The loading mutex is hold upon restore until + // all loading actions are done. + i.loading.Lock() + i.loading.Unlock() +} + +func (i *inodeFileState) unstableAttr(ctx context.Context) (fs.UnstableAttr, error) { + _, valid, pattr, err := getattr(ctx, i.file) + if err != nil { + return fs.UnstableAttr{}, err + } + return unstable(ctx, valid, pattr, i.s.mounter, i.s.client), nil +} + +// session extracts the gofer's session from the MountSource. +func (i *inodeOperations) session() *session { + return i.fileState.s +} + +// Release implements fs.InodeOperations.Release. +func (i *inodeOperations) Release(ctx context.Context) { + i.fileState.Release(ctx) + i.cachingInodeOps.Release() +} + +// Mappable implements fs.InodeOperations.Mappable. +func (i *inodeOperations) Mappable(inode *fs.Inode) memmap.Mappable { + if i.session().cachePolicy == cacheNone || !fs.IsFile(inode.StableAttr) { + return nil + } + return i.cachingInodeOps +} + +func isCachable(session *session, inode *fs.Inode) bool { + return session.cachePolicy != cacheNone && (fs.IsFile(inode.StableAttr) || fs.IsDir(inode.StableAttr)) +} + +func isFileCachable(session *session, inode *fs.Inode) bool { + return session.cachePolicy != cacheNone && fs.IsFile(inode.StableAttr) +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + if isCachable(i.session(), inode) { + return i.cachingInodeOps.UnstableAttr(ctx, inode) + } + return i.fileState.unstableAttr(ctx) +} + +// Check implements fs.InodeOperations.Check. +func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + switch d.Inode.StableAttr.Type { + case fs.Socket: + return i.getFileSocket(ctx, d, flags) + case fs.Pipe: + return i.getFilePipe(ctx, d, flags) + default: + return i.getFileDefault(ctx, d, flags) + } +} + +func (i *inodeOperations) getFileSocket(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + f, err := i.fileState.file.connect(ctx, p9.AnonymousSocket) + if err != nil { + return nil, syscall.EIO + } + fsf, err := host.NewSocketWithDirent(ctx, d, f, flags) + if err != nil { + f.Close() + return nil, err + } + return fsf, nil +} + +func (i *inodeOperations) getFilePipe(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + // Try to open as a host pipe. + if pipeOps, err := fdpipe.Open(ctx, i, flags); err != errNotHostFile { + return fs.NewFile(ctx, d, flags, pipeOps), err + } + + // If the error is due to the fact that this was never a host pipe, then back + // this file with its dirent. + h, err := newHandles(ctx, i.fileState.file, flags) + if err != nil { + return nil, err + } + return NewFile(ctx, d, flags, i, h), nil +} + +// errNotHostFile indicates that the file is not a host file. +var errNotHostFile = errors.New("not a host file") + +// NonBlockingOpen implements fdpipe.NonBlockingOpener for opening host named pipes. +func (i *inodeOperations) NonBlockingOpen(ctx context.Context, p fs.PermMask) (*fd.FD, error) { + i.fileState.waitForLoad() + + // Get a cloned fid which we will open. + _, newFile, err := i.fileState.file.walk(ctx, nil) + if err != nil { + log.Warningf("Open Walk failed: %v", err) + return nil, err + } + defer newFile.close(ctx) + + flags, err := openFlagsFromPerms(p) + if err != nil { + log.Warningf("Open flags %s parsing failed: %v", p, err) + return nil, err + } + hostFile, _, _, err := newFile.open(ctx, flags) + // If the host file returned is nil and the error is nil, + // then this was never a host file to begin with, and should + // be treated like a remote file. + if hostFile == nil && err == nil { + return nil, errNotHostFile + } + return hostFile, err +} + +func (i *inodeOperations) getFileDefault(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + if !isFileCachable(i.session(), d.Inode) { + h, err := newHandles(ctx, i.fileState.file, flags) + if err != nil { + return nil, err + } + return NewFile(ctx, d, flags, i, h), nil + } + + h, ok := i.fileState.getCachedHandles(ctx, flags, d.Inode.MountSource) + if !ok { + var err error + h, err = newHandles(ctx, i.fileState.file, flags) + if err != nil { + return nil, err + } + } + i.fileState.setHandlesForCachedIO(flags, h) + + return NewFile(ctx, d, flags, i, h), nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (i *inodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + if isCachable(i.session(), inode) { + return i.cachingInodeOps.SetPermissions(ctx, inode, p) + } + + mask := p9.SetAttrMask{Permissions: true} + pattr := p9.SetAttr{Permissions: p9.FileMode(p.LinuxMode())} + // Execute the chmod. + return i.fileState.file.setAttr(ctx, mask, pattr) == nil +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *inodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + // Save the roundtrip. + if !owner.UID.Ok() && !owner.GID.Ok() { + return nil + } + + if isCachable(i.session(), inode) { + return i.cachingInodeOps.SetOwner(ctx, inode, owner) + } + + var mask p9.SetAttrMask + var attr p9.SetAttr + if owner.UID.Ok() { + mask.UID = true + attr.UID = p9.UID(owner.UID) + } + if owner.GID.Ok() { + mask.GID = true + attr.GID = p9.GID(owner.GID) + } + return i.fileState.file.setAttr(ctx, mask, attr) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *inodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if isCachable(i.session(), inode) { + return i.cachingInodeOps.SetTimestamps(ctx, inode, ts) + } + + return utimes(ctx, i.fileState.file, ts) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (i *inodeOperations) Truncate(ctx context.Context, inode *fs.Inode, length int64) error { + // This can only be called for files anyway. + if isFileCachable(i.session(), inode) { + return i.cachingInodeOps.Truncate(ctx, inode, length) + } + + return i.fileState.file.setAttr(ctx, p9.SetAttrMask{Size: true}, p9.SetAttr{Size: uint64(length)}) +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + if !isCachable(i.session(), inode) { + return nil + } + + return i.cachingInodeOps.WriteOut(ctx, inode) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if !fs.IsSymlink(inode.StableAttr) { + return "", syscall.ENOLINK + } + return i.fileState.file.readlink(ctx) +} + +// Getlink implementfs fs.InodeOperations.Getlink. +func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + if !fs.IsSymlink(i.fileState.sattr) { + return nil, syserror.ENOLINK + } + return nil, fs.ErrResolveViaReadlink +} + +// StatFS makes a StatFS request. +func (i *inodeOperations) StatFS(ctx context.Context) (fs.Info, error) { + fsstat, err := i.fileState.file.statFS(ctx) + if err != nil { + return fs.Info{}, err + } + + info := fs.Info{ + // This is primarily for distinguishing a gofer file system in + // tests. Testing is important, so instead of defining + // something completely random, use a standard value. + Type: linux.V9FS_MAGIC, + TotalBlocks: fsstat.Blocks, + FreeBlocks: fsstat.BlocksFree, + TotalFiles: fsstat.Files, + FreeFiles: fsstat.FilesFree, + } + + // If blocks available is non-zero, prefer that. + if fsstat.BlocksAvailable != 0 { + info.FreeBlocks = fsstat.BlocksAvailable + } + + return info, nil +} + +func init() { + syserror.AddErrorUnwrapper(func(err error) (syscall.Errno, bool) { + if _, ok := err.(p9.ErrSocket); ok { + // Treat as an I/O error. + return syscall.EIO, true + } + return 0, false + }) +} + +// AddLink implements InodeOperations.AddLink, but is currently a noop. +// FIXME: Remove this from InodeOperations altogether. +func (*inodeOperations) AddLink() {} + +// DropLink implements InodeOperations.DropLink, but is currently a noop. +// FIXME: Remove this from InodeOperations altogether. +func (*inodeOperations) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +// FIXME: Remove this from InodeOperations altogether. +func (i *inodeOperations) NotifyStatusChange(ctx context.Context) {} diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go new file mode 100644 index 000000000..997a7d1c1 --- /dev/null +++ b/pkg/sentry/fs/gofer/inode_state.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" +) + +// Some fs implementations may not support atime, ctime, or mtime in getattr. +// The unstable() logic would try to use clock time for them. However, we do not +// want to use such time during S/R as that would cause restore timestamp +// checking failure. Hence a dummy stable-time clock is needed. +// +// Note that application-visible UnstableAttrs either come from CachingInodeOps +// (in which case they are saved), or they are requested from the gofer on each +// stat (for non-caching), so the dummy time only affects the modification +// timestamp check. +type dummyClock struct { + time.Clock +} + +// Now returns a stable dummy time. +func (d *dummyClock) Now() time.Time { + return time.Time{} +} + +type dummyClockContext struct { + context.Context +} + +// Value implements context.Context +func (d *dummyClockContext) Value(key interface{}) interface{} { + switch key { + case time.CtxRealtimeClock: + return &dummyClock{} + default: + return d.Context.Value(key) + } +} + +// beforeSave is invoked by stateify. +func (i *inodeFileState) beforeSave() { + if _, ok := i.s.inodeMappings[i.sattr.InodeID]; !ok { + panic(fmt.Sprintf("failed to find path for inode number %d. Device %s contains %s", i.sattr.InodeID, i.s.connID, fs.InodeMappings(i.s.inodeMappings))) + } + if i.sattr.Type == fs.RegularFile { + uattr, err := i.unstableAttr(&dummyClockContext{context.Background()}) + if err != nil { + panic(fmt.Sprintf("failed to get unstable atttribute of %s: %v", i.s.inodeMappings[i.sattr.InodeID], err)) + } + i.savedUAttr = &uattr + } +} + +// saveLoading is invoked by stateify. +func (i *inodeFileState) saveLoading() struct{} { + return struct{}{} +} + +// loadLoading is invoked by stateify. +func (i *inodeFileState) loadLoading(_ struct{}) { + i.loading.Lock() +} + +// afterLoad is invoked by stateify. +func (i *inodeFileState) afterLoad() { + load := func() { + // See comment on i.loading(). + defer i.loading.Unlock() + + // Manually restore the p9.File. + name, ok := i.s.inodeMappings[i.sattr.InodeID] + if !ok { + // This should be impossible, see assertion in + // beforeSave. + panic(fmt.Sprintf("failed to find path for inode number %d. Device %s contains %s", i.sattr.InodeID, i.s.connID, fs.InodeMappings(i.s.inodeMappings))) + } + // TODO: Context is not plumbed to save/restore. + ctx := &dummyClockContext{context.Background()} + var err error + _, i.file, err = i.s.attach.walk(ctx, strings.Split(name, "/")) + if err != nil { + panic(fmt.Sprintf("failed to walk to %q: %v", name, err)) + } + + // Remap the saved inode number into the gofer device using the + // actual device and actual inode that exists in our new + // environment. + qid, mask, attrs, err := i.file.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + panic(fmt.Sprintf("failed to get file attributes of %s: %v", name, err)) + } + if !mask.RDev { + panic(fmt.Sprintf("file %s lacks device", name)) + } + i.key = device.MultiDeviceKey{ + Device: attrs.RDev, + SecondaryDevice: i.s.connID, + Inode: qid.Path, + } + if !goferDevice.Load(i.key, i.sattr.InodeID) { + panic(fmt.Sprintf("gofer device %s -> %d conflict in gofer device mappings: %s", i.key, i.sattr.InodeID, goferDevice)) + } + + if i.sattr.Type == fs.RegularFile { + env, ok := fs.CurrentRestoreEnvironment() + if !ok { + panic("missing restore environment") + } + uattr := unstable(ctx, mask, attrs, i.s.mounter, i.s.client) + if env.ValidateFileSize && uattr.Size != i.savedUAttr.Size { + panic(fmt.Errorf("file size has changed for %s: previously %d, now %d", i.s.inodeMappings[i.sattr.InodeID], i.savedUAttr.Size, uattr.Size)) + } + if env.ValidateFileTimestamp && uattr.ModificationTime != i.savedUAttr.ModificationTime { + panic(fmt.Errorf("file modification time has changed for %s: previously %v, now %v", i.s.inodeMappings[i.sattr.InodeID], i.savedUAttr.ModificationTime, uattr.ModificationTime)) + } + i.savedUAttr = nil + } + } + + fs.Async(load) +} diff --git a/pkg/sentry/fs/gofer/path.go b/pkg/sentry/fs/gofer/path.go new file mode 100644 index 000000000..d696f1561 --- /dev/null +++ b/pkg/sentry/fs/gofer/path.go @@ -0,0 +1,331 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// Lookup loads an Inode at name into a Dirent based on the session's cache +// policy. +func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + if i.session().cachePolicy != cacheNone { + // Check to see if we have readdirCache that indicates the + // child does not exist. Avoid holding readdirMu longer than + // we need to. + i.readdirMu.Lock() + if i.readdirCache != nil && !i.readdirCache.Contains(name) { + // No such child. Return a negative dirent. + i.readdirMu.Unlock() + return fs.NewNegativeDirent(name), nil + } + i.readdirMu.Unlock() + } + + // Get a p9.File for name. + qids, newFile, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name}) + if err != nil { + if err == syscall.ENOENT { + if i.session().cachePolicy != cacheNone { + // Return a negative Dirent. It will stay cached until something + // is created over it. + return fs.NewNegativeDirent(name), nil + } + return nil, syserror.ENOENT + } + return nil, err + } + + // Construct the Inode operations. + sattr, node := newInodeOperations(ctx, i.fileState.s, newFile, qids[0], mask, p9attr) + + // Construct a positive Dirent. + return fs.NewDirent(fs.NewInode(node, dir.MountSource, sattr), name), nil +} + +// Creates a new Inode at name and returns its File based on the session's cache policy. +// +// Ownership is currently ignored. +func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + // Create replaces the directory fid with the newly created/opened + // file, so clone this directory so it doesn't change out from under + // this node. + _, newFile, err := i.fileState.file.walk(ctx, nil) + if err != nil { + return nil, err + } + + // Map the FileFlags to p9 OpenFlags. + var openFlags p9.OpenFlags + switch { + case flags.Read && flags.Write: + openFlags = p9.ReadWrite + case flags.Read: + openFlags = p9.ReadOnly + case flags.Write: + openFlags = p9.WriteOnly + default: + panic(fmt.Sprintf("Create called with unknown or unset open flags: %v", flags)) + } + + owner := fs.FileOwnerFromContext(ctx) + hostFile, err := newFile.create(ctx, name, openFlags, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)) + if err != nil { + // Could not create the file. + return nil, err + } + + i.touchModificationTime(ctx) + + // Get the attributes of the file. + qid, mask, p9attr, err := getattr(ctx, newFile) + if err != nil { + newFile.close(ctx) + return nil, err + } + + // Get an unopened p9.File for the file we created so that it can be + // cloned and re-opened multiple times after creation. + _, unopened, err := i.fileState.file.walk(ctx, []string{name}) + if err != nil { + newFile.close(ctx) + return nil, err + } + + // Construct the InodeOperations. + sattr, iops := newInodeOperations(ctx, i.fileState.s, unopened, qid, mask, p9attr) + + // Construct the positive Dirent. + d := fs.NewDirent(fs.NewInode(iops, dir.MountSource, sattr), name) + defer d.DecRef() + + // Construct the new file, caching the handles if allowed. + h := &handles{ + File: newFile, + Host: hostFile, + } + if isFileCachable(iops.session(), d.Inode) { + iops.fileState.setHandlesForCachedIO(flags, h) + } + return NewFile(ctx, d, flags, iops, h), nil +} + +// CreateLink uses Create to create a symlink between oldname and newname. +func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error { + owner := fs.FileOwnerFromContext(ctx) + if _, err := i.fileState.file.symlink(ctx, oldname, newname, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil { + return err + } + i.touchModificationTime(ctx) + return nil +} + +// CreateHardLink implements InodeOperations.CreateHardLink. +func (i *inodeOperations) CreateHardLink(ctx context.Context, _ *fs.Inode, target *fs.Inode, newName string) error { + targetOpts, ok := target.InodeOperations.(*inodeOperations) + if !ok { + return syscall.EXDEV + } + + if err := i.fileState.file.link(ctx, &targetOpts.fileState.file, newName); err != nil { + return err + } + // TODO: Don't increase link count because we can't properly accounts for links + // with gofers. + i.touchModificationTime(ctx) + return nil +} + +// CreateDirectory uses Create to create a directory named s under inodeOperations. +func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s string, perm fs.FilePermissions) error { + owner := fs.FileOwnerFromContext(ctx) + if _, err := i.fileState.file.mkdir(ctx, s, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil { + return err + } + if i.session().cachePolicy == cacheAll { + // Increase link count. + i.cachingInodeOps.IncLinks(ctx) + + // Invalidate readdir cache. + i.markDirectoryDirty() + } + return nil +} + +// Bind implements InodeOperations. +func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, ep unix.BoundEndpoint, perm fs.FilePermissions) error { + if i.session().endpoints == nil { + return syscall.EOPNOTSUPP + } + + // Create replaces the directory fid with the newly created/opened + // file, so clone this directory so it doesn't change out from under + // this node. + _, newFile, err := i.fileState.file.walk(ctx, nil) + if err != nil { + return err + } + + // Stabilize the endpoint map while creation is in progress. + unlock := i.session().endpoints.lock() + defer unlock() + + // Create a regular file in the gofer and then mark it as a socket by + // adding this inode key in the 'endpoints' map. + owner := fs.FileOwnerFromContext(ctx) + hostFile, err := newFile.create(ctx, name, p9.ReadWrite, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)) + if err != nil { + return err + } + // We're not going to use this file. + hostFile.Close() + + i.touchModificationTime(ctx) + + // Get the attributes of the file to create inode key. + qid, _, attr, err := getattr(ctx, newFile) + if err != nil { + newFile.close(ctx) + return err + } + + key := device.MultiDeviceKey{ + Device: attr.RDev, + SecondaryDevice: i.session().connID, + Inode: qid.Path, + } + i.session().endpoints.add(key, ep) + + return nil +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. Gofer nodes do not support the +// creation of fifos and always returns EOPNOTSUPP. +func (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syscall.EOPNOTSUPP +} + +// Remove implements InodeOperations.Remove. +func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + var key device.MultiDeviceKey + removeSocket := false + if i.session().endpoints != nil { + // Find out if file being deleted is a socket that needs to be + // removed from endpoint map. + if d, err := i.Lookup(ctx, dir, name); err == nil { + defer d.DecRef() + if fs.IsSocket(d.Inode.StableAttr) { + child := d.Inode.InodeOperations.(*inodeOperations) + key = child.fileState.key + removeSocket = true + + // Stabilize the endpoint map while deletion is in progress. + unlock := i.session().endpoints.lock() + defer unlock() + } + } + } + + if err := i.fileState.file.unlinkAt(ctx, name, 0); err != nil { + return err + } + if removeSocket { + i.session().endpoints.remove(key) + } + i.touchModificationTime(ctx) + + return nil +} + +// Remove implements InodeOperations.RemoveDirectory. +func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + // 0x200 = AT_REMOVEDIR. + if err := i.fileState.file.unlinkAt(ctx, name, 0x200); err != nil { + return err + } + if i.session().cachePolicy == cacheAll { + // Decrease link count and updates atime. + i.cachingInodeOps.DecLinks(ctx) + + // Invalidate readdir cache. + i.markDirectoryDirty() + } + return nil +} + +// Rename renames this node. +func (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + // Unwrap the new parent to a *inodeOperations. + newParentInodeOperations, ok := newParent.InodeOperations.(*inodeOperations) + if !ok { + return syscall.EXDEV + } + + // Unwrap the old parent to a *inodeOperations. + oldParentInodeOperations, ok := oldParent.InodeOperations.(*inodeOperations) + if !ok { + return syscall.EXDEV + } + + // Do the rename. + if err := i.fileState.file.rename(ctx, newParentInodeOperations.fileState.file, newName); err != nil { + return err + } + + // Update cached state. + if i.session().cachePolicy == cacheAll { + // Is the renamed entity a directory? Fix link counts. + if fs.IsDir(i.fileState.sattr) { + oldParentInodeOperations.cachingInodeOps.DecLinks(ctx) + newParentInodeOperations.cachingInodeOps.IncLinks(ctx) + } + + // Mark old directory dirty. + oldParentInodeOperations.markDirectoryDirty() + if oldParent != newParent { + // Mark new directory dirty. + newParentInodeOperations.markDirectoryDirty() + } + } + return nil +} + +func (i *inodeOperations) touchModificationTime(ctx context.Context) { + if i.session().cachePolicy == cacheAll { + i.cachingInodeOps.TouchModificationTime(ctx) + + // Invalidate readdir cache. + i.markDirectoryDirty() + } +} + +// markDirectoryDirty marks any cached data dirty for this directory. This is necessary in order +// to ensure that this node does not retain stale state throughout its lifetime across multiple +// open directory handles. +// +// Currently this means invalidating any readdir caches. +func (i *inodeOperations) markDirectoryDirty() { + i.readdirMu.Lock() + defer i.readdirMu.Unlock() + i.readdirCache = nil +} diff --git a/pkg/sentry/fs/gofer/session.go b/pkg/sentry/fs/gofer/session.go new file mode 100644 index 000000000..ab3b964e0 --- /dev/null +++ b/pkg/sentry/fs/gofer/session.go @@ -0,0 +1,251 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +type endpointMap struct { + mu sync.RWMutex + m map[device.MultiDeviceKey]unix.BoundEndpoint +} + +// add adds the endpoint to the map. +// +// Precondition: map must have been locked with 'lock'. +func (e *endpointMap) add(key device.MultiDeviceKey, ep unix.BoundEndpoint) { + e.m[key] = ep +} + +// remove deletes the key from the map. +// +// Precondition: map must have been locked with 'lock'. +func (e *endpointMap) remove(key device.MultiDeviceKey) { + delete(e.m, key) +} + +// lock blocks other addition and removal operations from happening while +// the backing file is being created or deleted. Returns a function that unlocks +// the endpoint map. +func (e *endpointMap) lock() func() { + e.mu.Lock() + return func() { e.mu.Unlock() } +} + +func (e *endpointMap) get(key device.MultiDeviceKey) unix.BoundEndpoint { + e.mu.RLock() + ep := e.m[key] + e.mu.RUnlock() + return ep +} + +// session holds state for each 9p session established during sys_mount. +type session struct { + refs.AtomicRefCount + + // conn is a unet.Socket that wraps the readFD/writeFD mount option, + // see fs/gofer/fs.go. + conn *unet.Socket `state:"nosave"` + + // msize is the value of the msize mount option, see fs/gofer/fs.go. + msize uint32 `state:"wait"` + + // version is the value of the version mount option, see fs/gofer/fs.go. + version string `state:"wait"` + + // cachePolicy is the cache policy. It may be either cacheAll or cacheNone. + cachePolicy cachePolicy `state:"wait"` + + // aname is the value of the aname mount option, see fs/gofer/fs.go. + aname string `state:"wait"` + + // The client associated with this session. This will be initialized lazily. + client *p9.Client `state:"nosave"` + + // The p9.File pointing to attachName via the client. This will be initialized + // lazily. + attach contextFile `state:"nosave"` + + // Flags provided to the mount. + superBlockFlags fs.MountSourceFlags `state:"wait"` + + // connID is a unique identifier for the session connection. + connID string `state:"wait"` + + // inodeMappings contains mappings of fs.Inodes associated with this session + // to paths relative to the attach point, where inodeMappings is keyed by + // Inode.StableAttr.InodeID. + inodeMappings map[uint64]string `state:"wait"` + + // mounter is the EUID/EGID that mounted this file system. + mounter fs.FileOwner `state:"wait"` + + // endpoints is used to map inodes that represent socket files to their + // corresponding endpoint. Socket files are created as regular files in the + // gofer and their presence in this map indicate that they should indeed be + // socket files. This allows unix domain sockets to be used with paths that + // belong to a gofer. + // + // TODO: there are few possible races with someone stat'ing the + // file and another deleting it concurrently, where the file will not be + // reported as socket file. + endpoints *endpointMap `state:"wait"` +} + +// Destroy tears down the session. +func (s *session) Destroy() { + s.conn.Close() +} + +// Revalidate returns true if the cache policy is does not allow for VFS caching. +func (s *session) Revalidate(*fs.Dirent) bool { + return s.cachePolicy == cacheNone +} + +// TakeRefs takes an extra reference on dirent if possible. +func (s *session) Keep(dirent *fs.Dirent) bool { + // NOTE: Only cache files and directories. + sattr := dirent.Inode.StableAttr + return s.cachePolicy != cacheNone && (fs.IsFile(sattr) || fs.IsDir(sattr)) +} + +// ResetInodeMappings implements fs.MountSourceOperations.ResetInodeMappings. +func (s *session) ResetInodeMappings() { + s.inodeMappings = make(map[uint64]string) +} + +// SaveInodeMapping implements fs.MountSourceOperations.SaveInodeMapping. +func (s *session) SaveInodeMapping(inode *fs.Inode, path string) { + // This is very unintuitive. We *CANNOT* trust the inode's StableAttrs, + // because overlay copyUp may have changed them out from under us. + // So much for "immutable". + sattr := inode.InodeOperations.(*inodeOperations).fileState.sattr + s.inodeMappings[sattr.InodeID] = path +} + +// newInodeOperations creates a new 9p fs.InodeOperations backed by a p9.File and attributes +// (p9.QID, p9.AttrMask, p9.Attr). +func newInodeOperations(ctx context.Context, s *session, file contextFile, qid p9.QID, valid p9.AttrMask, attr p9.Attr) (fs.StableAttr, *inodeOperations) { + deviceKey := device.MultiDeviceKey{ + Device: attr.RDev, + SecondaryDevice: s.connID, + Inode: qid.Path, + } + + sattr := fs.StableAttr{ + Type: ntype(attr), + DeviceID: goferDevice.DeviceID(), + InodeID: goferDevice.Map(deviceKey), + BlockSize: bsize(attr), + } + + if s.endpoints != nil { + // If unix sockets are allowed on this filesystem, check if this file is + // supposed to be a socket file. + if s.endpoints.get(deviceKey) != nil { + sattr.Type = fs.Socket + } + } + + fileState := &inodeFileState{ + s: s, + file: file, + sattr: sattr, + key: deviceKey, + } + + uattr := unstable(ctx, valid, attr, s.mounter, s.client) + return sattr, &inodeOperations{ + fileState: fileState, + cachingInodeOps: fsutil.NewCachingInodeOperations(ctx, fileState, uattr, s.superBlockFlags.ForcePageCache), + } +} + +// Root returns the root of a 9p mount. This mount is bound to a 9p server +// based on conn. Otherwise configuration parameters are: +// +// * dev: connection id +// * filesystem: the filesystem backing the mount +// * superBlockFlags: the mount flags describing general mount options +// * opts: parsed 9p mount options +func Root(ctx context.Context, dev string, filesystem fs.Filesystem, superBlockFlags fs.MountSourceFlags, o opts) (*fs.Inode, error) { + // The mounting EUID/EGID will be cached by this file system. This will + // be used to assign ownership to files that the Gofer owns. + mounter := fs.FileOwnerFromContext(ctx) + + conn, err := unet.NewSocket(o.fd) + if err != nil { + return nil, err + } + + // Construct the session. + s := &session{ + connID: dev, + conn: conn, + msize: o.msize, + version: o.version, + cachePolicy: o.policy, + aname: o.aname, + superBlockFlags: superBlockFlags, + mounter: mounter, + } + + if o.privateunixsocket { + s.endpoints = &endpointMap{m: make(map[device.MultiDeviceKey]unix.BoundEndpoint)} + } + + // Construct the MountSource with the session and superBlockFlags. + m := fs.NewMountSource(s, filesystem, superBlockFlags) + + // Send the Tversion request. + s.client, err = p9.NewClient(s.conn, s.msize, s.version) + if err != nil { + // Drop our reference on the session, it needs to be torn down. + s.DecRef() + return nil, err + } + + // Notify that we're about to call the Gofer and block. + ctx.UninterruptibleSleepStart(false) + // Send the Tattach request. + s.attach.file, err = s.client.Attach(s.aname) + ctx.UninterruptibleSleepFinish(false) + if err != nil { + // Same as above. + s.DecRef() + return nil, err + } + + qid, valid, attr, err := s.attach.getAttr(ctx, p9.AttrMaskAll()) + if err != nil { + s.attach.close(ctx) + // Same as above, but after we execute the Close request. + s.DecRef() + return nil, err + } + + sattr, iops := newInodeOperations(ctx, s, s.attach, qid, valid, attr) + return fs.NewInode(iops, m, sattr), nil +} diff --git a/pkg/sentry/fs/gofer/session_state.go b/pkg/sentry/fs/gofer/session_state.go new file mode 100644 index 000000000..4d993a219 --- /dev/null +++ b/pkg/sentry/fs/gofer/session_state.go @@ -0,0 +1,90 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// afterLoad is invoked by stateify. +func (s *session) afterLoad() { + // The restore environment contains the 9p connection of this mount. + fsys := filesystem{} + env, ok := fs.CurrentRestoreEnvironment() + if !ok { + panic("failed to find restore environment") + } + mounts, ok := env.MountSources[fsys.Name()] + if !ok { + panic("failed to find mounts for filesystem type " + fsys.Name()) + } + var args fs.MountArgs + var found bool + for _, mount := range mounts { + if mount.Dev == s.connID { + args = mount + found = true + } + } + if !found { + panic(fmt.Sprintf("no connection for connection id %q", s.connID)) + } + + // Validate the mount flags and options. + opts, err := options(args.Data) + if err != nil { + panic("failed to parse mount options: " + err.Error()) + } + if opts.msize != s.msize { + panic(fmt.Sprintf("new message size %v, want %v", opts.msize, s.msize)) + } + if opts.version != s.version { + panic(fmt.Sprintf("new version %v, want %v", opts.version, s.version)) + } + if opts.policy != s.cachePolicy { + panic(fmt.Sprintf("new cache policy %v, want %v", opts.policy, s.cachePolicy)) + } + if opts.aname != s.aname { + panic(fmt.Sprintf("new attach name %v, want %v", opts.aname, s.aname)) + } + if opts.privateunixsocket != (s.endpoints != nil) { + panic(fmt.Sprintf("new privateunixsocket option %v, want %v", opts.privateunixsocket, s.endpoints != nil)) + } + if args.Flags != s.superBlockFlags { + panic(fmt.Sprintf("new mount flags %v, want %v", args.Flags, s.superBlockFlags)) + } + + // Manually restore the connection. + s.conn, err = unet.NewSocket(opts.fd) + if err != nil { + panic(fmt.Sprintf("failed to create Socket for FD %d: %v", opts.fd, err)) + } + + // Manually restore the client. + s.client, err = p9.NewClient(s.conn, s.msize, s.version) + if err != nil { + panic(fmt.Sprintf("failed to connect client to server: %v", err)) + } + + // Manually restore the attach point. + s.attach.file, err = s.client.Attach(s.aname) + if err != nil { + panic(fmt.Sprintf("failed to attach to aname: %v", err)) + } +} diff --git a/pkg/sentry/fs/gofer/socket.go b/pkg/sentry/fs/gofer/socket.go new file mode 100644 index 000000000..954000ef0 --- /dev/null +++ b/pkg/sentry/fs/gofer/socket.go @@ -0,0 +1,127 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/host" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// BoundEndpoint returns a gofer-backed unix.BoundEndpoint. +func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) unix.BoundEndpoint { + if !fs.IsSocket(i.fileState.sattr) { + return nil + } + + if i.session().endpoints != nil { + ep := i.session().endpoints.get(i.fileState.key) + if ep != nil { + return ep + } + + // Not found in endpoints map, it may be a gofer backed unix socket... + } + + inode.IncRef() + return &endpoint{inode, i.fileState.file.file, path} +} + +// endpoint is a Gofer-backed unix.BoundEndpoint. +// +// An endpoint's lifetime is the time between when InodeOperations.BoundEndpoint() +// is called and either BoundEndpoint.BidirectionalConnect or +// BoundEndpoint.UnidirectionalConnect is called. +type endpoint struct { + // inode is the filesystem inode which produced this endpoint. + inode *fs.Inode + + // file is the p9 file that contains a single unopened fid. + file p9.File + + // path is the sentry path where this endpoint is bound. + path string +} + +func unixSockToP9(t unix.SockType) (p9.ConnectFlags, bool) { + switch t { + case unix.SockStream: + return p9.StreamSocket, true + case unix.SockSeqpacket: + return p9.SeqpacketSocket, true + case unix.SockDgram: + return p9.DgramSocket, true + } + return 0, false +} + +// BidirectionalConnect implements ConnectableEndpoint.BidirectionalConnect. +func (e *endpoint) BidirectionalConnect(ce unix.ConnectingEndpoint, returnConnect func(unix.Receiver, unix.ConnectedEndpoint)) *tcpip.Error { + cf, ok := unixSockToP9(ce.Type()) + if !ok { + return tcpip.ErrConnectionRefused + } + + // No lock ordering required as only the ConnectingEndpoint has a mutex. + ce.Lock() + defer ce.Unlock() + + // Check connecting state. + if ce.Connected() { + return tcpip.ErrAlreadyConnected + } + if ce.Listening() { + return tcpip.ErrInvalidEndpointState + } + + hostFile, err := e.file.Connect(cf) + if err != nil { + return tcpip.ErrConnectionRefused + } + + r, c, terr := host.NewConnectedEndpoint(hostFile, ce.WaiterQueue(), e.path) + if terr != nil { + return terr + } + returnConnect(r, c) + return nil +} + +// UnidirectionalConnect implements unix.BoundEndpoint.UnidirectionalConnect. +func (e *endpoint) UnidirectionalConnect() (unix.ConnectedEndpoint, *tcpip.Error) { + hostFile, err := e.file.Connect(p9.DgramSocket) + if err != nil { + return nil, tcpip.ErrConnectionRefused + } + + r, c, terr := host.NewConnectedEndpoint(hostFile, &waiter.Queue{}, e.path) + if terr != nil { + return nil, terr + } + + // We don't need the receiver. + r.CloseRecv() + r.Release() + + return c, nil +} + +// Release implements unix.BoundEndpoint.Release. +func (e *endpoint) Release() { + e.inode.DecRef() +} diff --git a/pkg/sentry/fs/gofer/util.go b/pkg/sentry/fs/gofer/util.go new file mode 100644 index 000000000..d9ed8c81e --- /dev/null +++ b/pkg/sentry/fs/gofer/util.go @@ -0,0 +1,60 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofer + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/p9" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +func utimes(ctx context.Context, file contextFile, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + mask := p9.SetAttrMask{ + ATime: !ts.ATimeOmit, + ATimeNotSystemTime: !ts.ATimeSetSystemTime, + MTime: !ts.MTimeOmit, + MTimeNotSystemTime: !ts.MTimeSetSystemTime, + } + as, ans := ts.ATime.Unix() + ms, mns := ts.MTime.Unix() + attr := p9.SetAttr{ + ATimeSeconds: uint64(as), + ATimeNanoSeconds: uint64(ans), + MTimeSeconds: uint64(ms), + MTimeNanoSeconds: uint64(mns), + } + // 9p2000.L SetAttr: "If a time bit is set without the corresponding SET bit, + // the current system time on the server is used instead of the value sent + // in the request." + return file.setAttr(ctx, mask, attr) +} + +func openFlagsFromPerms(p fs.PermMask) (p9.OpenFlags, error) { + switch { + case p.Read && p.Write: + return p9.ReadWrite, nil + case p.Write: + return p9.WriteOnly, nil + case p.Read: + return p9.ReadOnly, nil + default: + return 0, syscall.EINVAL + } +} diff --git a/pkg/sentry/fs/host/BUILD b/pkg/sentry/fs/host/BUILD new file mode 100644 index 000000000..97b64daed --- /dev/null +++ b/pkg/sentry/fs/host/BUILD @@ -0,0 +1,104 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "host_state", + srcs = [ + "control.go", + "descriptor.go", + "descriptor_state.go", + "file.go", + "fs.go", + "inode.go", + "inode_state.go", + "socket.go", + "socket_state.go", + ], + out = "host_state.go", + package = "host", +) + +go_library( + name = "host", + srcs = [ + "control.go", + "descriptor.go", + "descriptor_state.go", + "device.go", + "file.go", + "fs.go", + "host_state.go", + "inode.go", + "inode_state.go", + "ioctl_unsafe.go", + "socket.go", + "socket_state.go", + "socket_unsafe.go", + "util.go", + "util_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/host", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/fd", + "//pkg/log", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/lock", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/socket", + "//pkg/sentry/socket/control", + "//pkg/sentry/socket/unix", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/transport/unix", + "//pkg/unet", + "//pkg/waiter", + "//pkg/waiter/fdnotifier", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +go_test( + name = "host_test", + size = "small", + srcs = [ + "fs_test.go", + "inode_test.go", + "socket_test.go", + "wait_test.go", + ], + embed = [":host"], + deps = [ + "//pkg/fd", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/socket", + "//pkg/sentry/usermem", + "//pkg/syserr", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + "//pkg/waiter/fdnotifier", + ], +) diff --git a/pkg/sentry/fs/host/control.go b/pkg/sentry/fs/host/control.go new file mode 100644 index 000000000..d2b007ab2 --- /dev/null +++ b/pkg/sentry/fs/host/control.go @@ -0,0 +1,90 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +type scmRights struct { + fds []int +} + +func newSCMRights(fds []int) control.SCMRights { + return &scmRights{fds} +} + +// Files implements control.SCMRights.Files. +func (c *scmRights) Files(ctx context.Context, max int) control.RightsFiles { + n := max + if l := len(c.fds); n > l { + n = l + } + + rf := control.RightsFiles(fdsToFiles(ctx, c.fds[:n])) + + // Only consume converted FDs (fdsToFiles may convert fewer than n FDs). + c.fds = c.fds[len(rf):] + return rf +} + +// Clone implements unix.RightsControlMessage.Clone. +func (c *scmRights) Clone() unix.RightsControlMessage { + // Host rights never need to be cloned. + return nil +} + +// Release implements unix.RightsControlMessage.Release. +func (c *scmRights) Release() { + for _, fd := range c.fds { + syscall.Close(fd) + } + c.fds = nil +} + +// If an error is encountered, only files created before the error will be +// returned. This is what Linux does. +func fdsToFiles(ctx context.Context, fds []int) []*fs.File { + files := make([]*fs.File, 0, len(fds)) + for _, fd := range fds { + // Get flags. We do it here because they may be modified + // by subsequent functions. + fileFlags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFL, 0) + if errno != 0 { + ctx.Warningf("Error retrieving host FD flags: %v", error(errno)) + break + } + + // Create the file backed by hostFD. + file, err := NewFile(ctx, fd, fs.FileOwnerFromContext(ctx)) + if err != nil { + ctx.Warningf("Error creating file from host FD: %v", err) + break + } + + // Set known flags. + file.SetFlags(fs.SettableFileFlags{ + NonBlocking: fileFlags&syscall.O_NONBLOCK != 0, + }) + + files = append(files, file) + } + return files +} diff --git a/pkg/sentry/fs/host/descriptor.go b/pkg/sentry/fs/host/descriptor.go new file mode 100644 index 000000000..613bd06e8 --- /dev/null +++ b/pkg/sentry/fs/host/descriptor.go @@ -0,0 +1,118 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "path" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +// descriptor wraps a host fd. +type descriptor struct { + // donated is true if the host fd was donated by another process. + donated bool + + // If origFD >= 0, it is the host fd that this file was + // originally created from, which must be available at time + // of restore. Only valid if donated is true. + origFD int + + // wouldBlock is true if value (below) points to a file that can + // return EWOULDBLOCK for operations that would block. + wouldBlock bool + + // value is the wrapped host fd. It is never saved or restored + // directly. How it is restored depends on whether it was + // donated and the fs.MountSource it was originally + // opened/created from. + value int `state:"nosave"` +} + +// newDescriptor returns a wrapped host file descriptor. On success, +// the descriptor is registered for event notifications with queue. +func newDescriptor(fd int, donated bool, saveable bool, wouldBlock bool, queue *waiter.Queue) (*descriptor, error) { + ownedFD := fd + origFD := -1 + if saveable { + var err error + ownedFD, err = syscall.Dup(fd) + if err != nil { + return nil, err + } + origFD = fd + } + if wouldBlock { + if err := syscall.SetNonblock(ownedFD, true); err != nil { + return nil, err + } + if err := fdnotifier.AddFD(int32(ownedFD), queue); err != nil { + return nil, err + } + } + return &descriptor{ + donated: donated, + origFD: origFD, + wouldBlock: wouldBlock, + value: ownedFD, + }, nil +} + +// initAfterLoad initializes the value of the descriptor after Load. +func (d *descriptor) initAfterLoad(mo *superOperations, id uint64, queue *waiter.Queue) error { + if d.donated { + var err error + d.value, err = syscall.Dup(d.origFD) + if err != nil { + return fmt.Errorf("failed to dup restored fd %d: %v", d.origFD, err) + } + } else { + name, ok := mo.inodeMappings[id] + if !ok { + return fmt.Errorf("failed to find path for inode number %d", id) + } + fullpath := path.Join(mo.root, name) + + var err error + d.value, err = open(nil, fullpath) + if err != nil { + return fmt.Errorf("failed to open %q: %v", fullpath, err) + } + } + if d.wouldBlock { + if err := syscall.SetNonblock(d.value, true); err != nil { + return err + } + if err := fdnotifier.AddFD(int32(d.value), queue); err != nil { + return err + } + } + return nil +} + +// Release releases all resources held by descriptor. +func (d *descriptor) Release() { + if d.wouldBlock { + fdnotifier.RemoveFD(int32(d.value)) + } + if err := syscall.Close(d.value); err != nil { + log.Warningf("error closing fd %d: %v", d.value, err) + } + d.value = -1 +} diff --git a/pkg/sentry/fs/host/descriptor_state.go b/pkg/sentry/fs/host/descriptor_state.go new file mode 100644 index 000000000..7fb274451 --- /dev/null +++ b/pkg/sentry/fs/host/descriptor_state.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +// beforeSave is invoked by stateify. +func (d *descriptor) beforeSave() { + if d.donated && d.origFD < 0 { + panic("donated file descriptor cannot be saved") + } +} + +// afterLoad is invoked by stateify. +func (d *descriptor) afterLoad() { + // value must be manually restored by the descriptor's parent using + // initAfterLoad. + d.value = -1 +} diff --git a/pkg/sentry/fs/host/device.go b/pkg/sentry/fs/host/device.go new file mode 100644 index 000000000..f2a0b6b15 --- /dev/null +++ b/pkg/sentry/fs/host/device.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/device" +) + +// hostFileDevice is the host file virtual device. +var hostFileDevice = device.NewAnonMultiDevice() + +// hostPipeDevice is the host pipe virtual device. +var hostPipeDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/host/file.go b/pkg/sentry/fs/host/file.go new file mode 100644 index 000000000..bdf844337 --- /dev/null +++ b/pkg/sentry/fs/host/file.go @@ -0,0 +1,371 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "golang.org/x/sys/unix" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/secio" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +// fileOperations implements fs.FileOperations for a host file descriptor. +type fileOperations struct { + fsutil.NoopRelease `state:"nosave"` + + // iops are the Inode operations for this file. + iops *inodeOperations `state:"wait"` + + // a scratch buffer for reading directory entries. + dirinfo *dirInfo `state:"nosave"` + + // dirCursor is the directory cursor. + dirCursor string + + // allowIoctl determines whether ioctls should be passed through to the + // host. + allowIoctl bool +} + +// fileOperations implements fs.FileOperations. +var _ fs.FileOperations = (*fileOperations)(nil) + +// NewFile creates a new File backed by the provided host file descriptor. If +// NewFile succeeds, ownership of the fd is transferred to the returned File. +// +// The returned File cannot be saved, since there is no guarantee that the same +// fd will exist or represent the same file at time of restore. If such a +// guarantee does exist, use ImportFile instead. +func NewFile(ctx context.Context, fd int, mounter fs.FileOwner) (*fs.File, error) { + return newFileFromDonatedFD(ctx, fd, mounter, false, false) +} + +// ImportFile creates a new File backed by the provided host file descriptor. +// Unlike NewFile, the file descriptor used by the File is duped from fd to +// ensure that later changes to fd are not reflected by the fs.File. +// +// If the returned file is saved, it will be restored by re-importing the fd +// originally passed to ImportFile. It is the restorer's responsibility to +// ensure that the fd represents the same file. +func ImportFile(ctx context.Context, fd int, mounter fs.FileOwner, allowIoctl bool) (*fs.File, error) { + return newFileFromDonatedFD(ctx, fd, mounter, true, allowIoctl) +} + +// newFileFromDonatedFD returns an fs.File from a donated fd. If the fd is +// saveable, then saveable is true. +func newFileFromDonatedFD(ctx context.Context, donated int, mounter fs.FileOwner, saveable, allowIoctl bool) (*fs.File, error) { + var s syscall.Stat_t + if err := syscall.Fstat(donated, &s); err != nil { + return nil, err + } + switch s.Mode & syscall.S_IFMT { + case syscall.S_IFSOCK: + flags, err := fileFlagsFromDonatedFD(donated) + if err != nil { + return nil, err + } + s, err := newSocket(ctx, donated, saveable) + if err != nil { + return nil, err + } + s.SetFlags(fs.SettableFileFlags{ + NonBlocking: flags.NonBlocking, + }) + return s, nil + default: + flags, err := fileFlagsFromDonatedFD(donated) + if err != nil { + return nil, err + } + msrc := newMountSource(ctx, "/", mounter, &Filesystem{}, fs.MountSourceFlags{}, false /* dontTranslateOwnership */) + inode, err := newInode(ctx, msrc, donated, saveable, true /* donated */) + if err != nil { + return nil, err + } + iops := inode.InodeOperations.(*inodeOperations) + + name := fmt.Sprintf("host:[%d]", inode.StableAttr.InodeID) + dirent := fs.NewDirent(inode, name) + defer dirent.DecRef() + + return newFile(ctx, dirent, flags, iops, allowIoctl), nil + } +} + +func fileFlagsFromDonatedFD(donated int) (fs.FileFlags, error) { + flags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(donated), syscall.F_GETFL, 0) + if errno != 0 { + log.Warningf("Failed to get file flags for donated fd %d (errno=%d)", donated, errno) + return fs.FileFlags{}, syscall.EIO + } + accmode := flags & syscall.O_ACCMODE + return fs.FileFlags{ + Direct: flags&syscall.O_DIRECT != 0, + NonBlocking: flags&syscall.O_NONBLOCK != 0, + Sync: flags&syscall.O_SYNC != 0, + Append: flags&syscall.O_APPEND != 0, + Read: accmode == syscall.O_RDONLY || accmode == syscall.O_RDWR, + Write: accmode == syscall.O_WRONLY || accmode == syscall.O_RDWR, + }, nil +} + +// newFile returns a new fs.File. +func newFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags, iops *inodeOperations, allowIoctl bool) *fs.File { + if !iops.ReturnsWouldBlock() { + // Allow reading/writing at an arbitrary offset for files + // that support it. + flags.Pread = true + flags.Pwrite = true + } + return fs.NewFile(ctx, dirent, flags, &fileOperations{ + iops: iops, + allowIoctl: allowIoctl, + }) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (f *fileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + f.iops.fileState.queue.EventRegister(e, mask) + fdnotifier.UpdateFD(int32(f.iops.fileState.FD())) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (f *fileOperations) EventUnregister(e *waiter.Entry) { + f.iops.fileState.queue.EventUnregister(e) + fdnotifier.UpdateFD(int32(f.iops.fileState.FD())) +} + +// Readiness uses the poll() syscall to check the status of the underlying FD. +func (f *fileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return fdnotifier.NonBlockingPoll(int32(f.iops.fileState.FD()), mask) +} + +// Readdir implements fs.FileOperations.Readdir. +func (f *fileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &f.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, f, root, dirCtx, file.Offset()) +} + +// IterateDir implements fs.DirIterator.IterateDir. +func (f *fileOperations) IterateDir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + if f.dirinfo == nil { + f.dirinfo = new(dirInfo) + f.dirinfo.buf = make([]byte, usermem.PageSize) + } + entries, err := f.iops.readdirAll(f.dirinfo) + if err != nil { + return offset, err + } + count, err := fs.GenericReaddir(dirCtx, fs.NewSortedDentryMap(entries)) + return offset + count, err +} + +// Write implements fs.FileOperations.Write. +func (f *fileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + // Would this file block? + if f.iops.ReturnsWouldBlock() { + // These files can't be memory mapped, assert this. This also + // means that writes do not need to synchronize with memory + // mappings nor metadata cached by this file's fs.Inode. + if canMap(file.Dirent.Inode) { + panic("files that can return EWOULDBLOCK cannot be memory mapped") + } + // Ignore the offset, these files don't support writing at + // an arbitrary offset. + writer := fd.NewReadWriter(f.iops.fileState.FD()) + n, err := src.CopyInTo(ctx, safemem.FromIOWriter{writer}) + if isBlockError(err) { + err = syserror.ErrWouldBlock + } + return n, err + } + if !file.Dirent.Inode.MountSource.Flags.ForcePageCache { + writer := secio.NewOffsetWriter(fd.NewReadWriter(f.iops.fileState.FD()), offset) + return src.CopyInTo(ctx, safemem.FromIOWriter{writer}) + } + return f.iops.cachingInodeOps.Write(ctx, src, offset) +} + +// Read implements fs.FileOperations.Read. +func (f *fileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + // Would this file block? + if f.iops.ReturnsWouldBlock() { + // These files can't be memory mapped, assert this. This also + // means that reads do not need to synchronize with memory + // mappings nor metadata cached by this file's fs.Inode. + if canMap(file.Dirent.Inode) { + panic("files that can return EWOULDBLOCK cannot be memory mapped") + } + // Ignore the offset, these files don't support reading at + // an arbitrary offset. + reader := fd.NewReadWriter(f.iops.fileState.FD()) + n, err := dst.CopyOutFrom(ctx, safemem.FromIOReader{reader}) + if isBlockError(err) { + // If we got any data at all, return it as a "completed" partial read + // rather than retrying until complete. + if n != 0 { + err = nil + } else { + err = syserror.ErrWouldBlock + } + } + return n, err + } + if !file.Dirent.Inode.MountSource.Flags.ForcePageCache { + reader := secio.NewOffsetReader(fd.NewReadWriter(f.iops.fileState.FD()), offset) + return dst.CopyOutFrom(ctx, safemem.FromIOReader{reader}) + } + return f.iops.cachingInodeOps.Read(ctx, file, dst, offset) +} + +// Fsync implements fs.FileOperations.Fsync. +func (f *fileOperations) Fsync(ctx context.Context, file *fs.File, start int64, end int64, syncType fs.SyncType) error { + switch syncType { + case fs.SyncAll, fs.SyncData: + if err := file.Dirent.Inode.WriteOut(ctx); err != nil { + return err + } + fallthrough + case fs.SyncBackingStorage: + return syscall.Fsync(f.iops.fileState.FD()) + } + panic("invalid sync type") +} + +// Flush implements fs.FileOperations.Flush. +func (f *fileOperations) Flush(context.Context, *fs.File) error { + // This is a no-op because flushing the resource backing this + // file would mean closing it. We can't do that because other + // open files may depend on the backing host fd. + return nil +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (f *fileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + if !canMap(file.Dirent.Inode) { + return syserror.ENODEV + } + return fsutil.GenericConfigureMMap(file, f.iops.cachingInodeOps, opts) +} + +// Seek implements fs.FileOperations.Seek. +func (f *fileOperations) Seek(ctx context.Context, file *fs.File, whence fs.SeekWhence, offset int64) (int64, error) { + return fsutil.SeekWithDirCursor(ctx, file, whence, offset, &f.dirCursor) +} + +// Ioctl implements fs.FileOperations.Iocotl. +func (f *fileOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + if !f.allowIoctl { + return 0, syserror.ENOTTY + } + // Ignore arg[0]. This is the real FD: + fd := f.iops.fileState.FD() + ioctl := args[1].Uint64() + switch ioctl { + case unix.TCGETS: + termios, err := ioctlGetTermios(fd) + if err != nil { + return 0, err + } + _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), termios, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case unix.TCSETS, unix.TCSETSW: + var termios linux.Termios + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &termios, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + err := ioctlSetTermios(fd, ioctl, &termios) + return 0, err + + case unix.TIOCGPGRP: + // Args: pid_t *argp + // When successful, equivalent to *argp = tcgetpgrp(fd). + // Get the process group ID of the foreground process group on + // this terminal. + + t := kernel.TaskFromContext(ctx) + if t == nil { + panic(fmt.Sprintf("cannot get thread group from context %v", ctx)) + } + tid := t.ThreadID() + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &tid, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case unix.TIOCSPGRP: + // Args: const pid_t *argp + // Equivalent to tcsetpgrp(fd, *argp). + // Set the foreground process group ID of this terminal. + + // Not much we can do with this one at the moment, so we just + // lie and pretend everything is great. Bash and Sh seem fine + // with this. + log.Warningf("Ignoring application ioctl(TIOCSPGRP) call") + return 0, nil + + case unix.TIOCGWINSZ: + // Args: struct winsize *argp + // Get window size. + winsize, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, err + } + _, err = usermem.CopyObjectOut(ctx, io, args[2].Pointer(), winsize, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case unix.TIOCSWINSZ: + // Args: const struct winsize *argp + // Set window size. + var winsize unix.Winsize + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &winsize, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + err := unix.IoctlSetWinsize(fd, unix.TIOCSWINSZ, &winsize) + return 0, err + + default: + return 0, syserror.ENOTTY + } +} diff --git a/pkg/sentry/fs/host/fs.go b/pkg/sentry/fs/host/fs.go new file mode 100644 index 000000000..ffd55a5ab --- /dev/null +++ b/pkg/sentry/fs/host/fs.go @@ -0,0 +1,327 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package host implements an fs.Filesystem for files backed by host +// file descriptors. +package host + +import ( + "fmt" + "path" + "path/filepath" + "strconv" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// FilesystemName is the name under which Filesystem is registered. +const FilesystemName = "whitelistfs" + +const ( + // whitelistKey is the mount option containing a comma-separated list + // of host paths to whitelist. + whitelistKey = "whitelist" + + // rootPathKey is the mount option containing the root path of the + // mount. + rootPathKey = "root" + + // dontTranslateOwnershipKey is the key to superOperations.dontTranslateOwnership. + dontTranslateOwnershipKey = "dont_translate_ownership" +) + +// maxTraversals determines link traversals in building the whitelist. +const maxTraversals = 10 + +// Filesystem is a pseudo file system that is only available during the setup +// to lock down the configurations. This filesystem should only be mounted at root. +// +// Think twice before exposing this to applications. +type Filesystem struct { + // whitelist is a set of host paths to whitelist. + paths []string +} + +// Name is the identifier of this file system. +func (*Filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount prohibits users from using mount(2) with this file system. +func (*Filesystem) AllowUserMount() bool { + return false +} + +// Flags returns that there is nothing special about this file system. +func (*Filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns an fs.Inode exposing the host file system. It is intended to be locked +// down in PreExec below. +func (f *Filesystem) Mount(ctx context.Context, _ string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // Parse generic comma-separated key=value options. + options := fs.GenericMountSourceOptions(data) + + // Grab the whitelist if one was specified. + // TODO: require another option "testonly" in order to allow + // no whitelist. + if wl, ok := options[whitelistKey]; ok { + f.paths = strings.Split(wl, "|") + delete(options, whitelistKey) + } + + // If the rootPath was set, use it. Othewise default to the root of the + // host fs. + rootPath := "/" + if rp, ok := options[rootPathKey]; ok { + rootPath = rp + delete(options, rootPathKey) + + // We must relativize the whitelisted paths to the new root. + for i, p := range f.paths { + rel, err := filepath.Rel(rootPath, p) + if err != nil { + return nil, fmt.Errorf("whitelist path %q must be a child of root path %q", p, rootPath) + } + f.paths[i] = path.Join("/", rel) + } + } + fd, err := open(nil, rootPath) + if err != nil { + return nil, fmt.Errorf("failed to find root: %v", err) + } + + var dontTranslateOwnership bool + if v, ok := options[dontTranslateOwnershipKey]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("invalid value for %q: %v", dontTranslateOwnershipKey, err) + } + dontTranslateOwnership = b + delete(options, dontTranslateOwnershipKey) + } + + // Fail if the caller passed us more options than we know about. + if len(options) > 0 { + return nil, fmt.Errorf("unsupported mount options: %v", options) + } + + // The mounting EUID/EGID will be cached by this file system. This will + // be used to assign ownership to files that we own. + owner := fs.FileOwnerFromContext(ctx) + + // Construct the host file system mount and inode. + msrc := newMountSource(ctx, rootPath, owner, f, flags, dontTranslateOwnership) + return newInode(ctx, msrc, fd, false /* saveable */, false /* donated */) +} + +// InstallWhitelist locks down the MountNamespace to only the currently installed +// Dirents and the given paths. +func (f *Filesystem) InstallWhitelist(ctx context.Context, m *fs.MountNamespace) error { + return installWhitelist(ctx, m, f.paths) +} + +func installWhitelist(ctx context.Context, m *fs.MountNamespace, paths []string) error { + if len(paths) == 0 || (len(paths) == 1 && paths[0] == "") { + // Warning will be logged during filter installation if the empty + // whitelist matters (allows for host file access). + return nil + } + + // Done tracks entries already added. + done := make(map[string]bool) + root := m.Root() + defer root.DecRef() + + for i := 0; i < len(paths); i++ { + // Make sure the path is absolute. This is a sanity check. + if !path.IsAbs(paths[i]) { + return fmt.Errorf("path %q is not absolute", paths[i]) + } + + // We need to add all the intermediate paths, in case one of + // them is a symlink that needs to be resolved. + for j := 1; j <= len(paths[i]); j++ { + if j < len(paths[i]) && paths[i][j] != '/' { + continue + } + current := paths[i][:j] + + // Lookup the given component in the tree. + d, err := m.FindLink(ctx, root, nil, current, maxTraversals) + if err != nil { + log.Warningf("populate failed for %q: %v", current, err) + continue + } + + // It's critical that this DecRef happens after the + // freeze below. This ensures that the dentry is in + // place to be frozen. Otherwise, we freeze without + // these entries. + defer d.DecRef() + + // Expand the last component if necessary. + if current == paths[i] { + // Is it a directory or symlink? + sattr := d.Inode.StableAttr + if fs.IsDir(sattr) { + for name := range childDentAttrs(ctx, d) { + paths = append(paths, path.Join(current, name)) + } + } + if fs.IsSymlink(sattr) { + // Only expand symlinks once. The + // folder structure may contain + // recursive symlinks and we don't want + // to end up infinitely expanding this + // symlink. This is safe because this + // is the last component. If a later + // path wants to symlink something + // beneath this symlink that will still + // be handled by the FindLink above. + if done[current] { + continue + } + + s, err := d.Inode.Readlink(ctx) + if err != nil { + log.Warningf("readlink failed for %q: %v", current, err) + continue + } + if path.IsAbs(s) { + paths = append(paths, s) + } else { + target := path.Join(path.Dir(current), s) + paths = append(paths, target) + } + } + } + + // Only report this one once even though we may look + // it up more than once. If we whitelist /a/b,/a then + // /a will be "done" when it is looked up for /a/b, + // however we still need to expand all of its contents + // when whitelisting /a. + if !done[current] { + log.Debugf("whitelisted: %s", current) + } + done[current] = true + } + } + + // Freeze the mount tree in place. This prevents any new paths from + // being opened and any old ones from being removed. If we do provide + // tmpfs mounts, we'll want to freeze/thaw those separately. + m.Freeze() + return nil +} + +func childDentAttrs(ctx context.Context, d *fs.Dirent) map[string]fs.DentAttr { + dirname, _ := d.FullName(nil /* root */) + dir, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + log.Warningf("failed to open directory %q: %v", dirname, err) + return nil + } + dir.DecRef() + var stubSerializer fs.CollectEntriesSerializer + if err := dir.Readdir(ctx, &stubSerializer); err != nil { + log.Warningf("failed to iterate on host directory %q: %v", dirname, err) + return nil + } + delete(stubSerializer.Entries, ".") + delete(stubSerializer.Entries, "..") + return stubSerializer.Entries +} + +// newMountSource constructs a new host fs.MountSource +// relative to a root path. The root should match the mount point. +func newMountSource(ctx context.Context, root string, mounter fs.FileOwner, filesystem fs.Filesystem, flags fs.MountSourceFlags, dontTranslateOwnership bool) *fs.MountSource { + return fs.NewMountSource(&superOperations{ + root: root, + inodeMappings: make(map[uint64]string), + mounter: mounter, + dontTranslateOwnership: dontTranslateOwnership, + }, filesystem, flags) +} + +// superOperations implements fs.MountSourceOperations. +type superOperations struct { + fs.SimpleMountSourceOperations `state:"nosave"` + + // root is the path of the mount point. All inode mappings + // are relative to this root. + root string + + // inodeMappings contains mappings of fs.Inodes associated + // with this MountSource to paths under root. + inodeMappings map[uint64]string + + // mounter is the cached EUID/EGID that mounted this file system. + mounter fs.FileOwner + + // dontTranslateOwnership indicates whether to not translate file + // ownership. + // + // By default, files/directories owned by the sandbox uses UID/GID + // of the mounter. For files/directories that are not owned by the + // sandbox, file UID/GID is translated to a UID/GID which cannot + // be mapped in the sandboxed application's user namespace. The + // UID/GID will look like the nobody UID/GID (65534) but is not + // strictly owned by the user "nobody". + // + // If whitelistfs is a lower filesystem in an overlay, set + // dont_translate_ownership=true in mount options. + dontTranslateOwnership bool +} + +var _ fs.MountSourceOperations = (*superOperations)(nil) + +// ResetInodeMappings implements fs.MountSourceOperations.ResetInodeMappings. +func (m *superOperations) ResetInodeMappings() { + m.inodeMappings = make(map[uint64]string) +} + +// SaveInodeMapping implements fs.MountSourceOperations.SaveInodeMapping. +func (m *superOperations) SaveInodeMapping(inode *fs.Inode, path string) { + // This is very unintuitive. We *CANNOT* trust the inode's StableAttrs, + // because overlay copyUp may have changed them out from under us. + // So much for "immutable". + sattr := inode.InodeOperations.(*inodeOperations).fileState.sattr + m.inodeMappings[sattr.InodeID] = path +} + +// Keep implements fs.MountSourceOperations.Keep. +// +// TODO: It is possible to change the permissions on a +// host file while it is in the dirent cache (say from RO to RW), but it is not +// possible to re-open the file with more relaxed permissions, since the host +// FD is already open and stored in the inode. +// +// Using the dirent LRU cache increases the odds that this bug is encountered. +// Since host file access is relatively fast anyways, we disable the LRU cache +// for host fs files. Once we can properly deal with permissions changes and +// re-opening host files, we should revisit whether or not to make use of the +// LRU cache. +func (*superOperations) Keep(*fs.Dirent) bool { + return false +} + +func init() { + fs.RegisterFilesystem(&Filesystem{}) +} diff --git a/pkg/sentry/fs/host/fs_test.go b/pkg/sentry/fs/host/fs_test.go new file mode 100644 index 000000000..c000afc49 --- /dev/null +++ b/pkg/sentry/fs/host/fs_test.go @@ -0,0 +1,383 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "reflect" + "sort" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// newTestMountNamespace creates a MountNamespace with a ramfs root. +// It returns the host folder created, which should be removed when done. +func newTestMountNamespace(t *testing.T) (*fs.MountNamespace, string, error) { + p, err := ioutil.TempDir("", "root") + if err != nil { + return nil, "", err + } + + fd, err := open(nil, p) + if err != nil { + os.RemoveAll(p) + return nil, "", err + } + ctx := contexttest.Context(t) + root, err := newInode(ctx, newMountSource(ctx, p, fs.RootOwner, &Filesystem{}, fs.MountSourceFlags{}, false), fd, false, false) + if err != nil { + os.RemoveAll(p) + return nil, "", err + } + mm, err := fs.NewMountNamespace(ctx, root) + if err != nil { + os.RemoveAll(p) + return nil, "", err + } + return mm, p, nil +} + +// createTestDirs populates the root with some test files and directories. +// /a/a1.txt +// /a/a2.txt +// /b/b1.txt +// /b/c/c1.txt +// /symlinks/normal.txt +// /symlinks/to_normal.txt -> /symlinks/normal.txt +// /symlinks/recursive -> /symlinks +func createTestDirs(ctx context.Context, t *testing.T, m *fs.MountNamespace) error { + r := m.Root() + defer r.DecRef() + + if err := r.CreateDirectory(ctx, r, "a", fs.FilePermsFromMode(0777)); err != nil { + return err + } + + a, err := r.Walk(ctx, r, "a") + if err != nil { + return err + } + defer a.DecRef() + + a1, err := a.Create(ctx, r, "a1.txt", fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + return err + } + a1.DecRef() + + a2, err := a.Create(ctx, r, "a2.txt", fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + return err + } + a2.DecRef() + + if err := r.CreateDirectory(ctx, r, "b", fs.FilePermsFromMode(0777)); err != nil { + return err + } + + b, err := r.Walk(ctx, r, "b") + if err != nil { + return err + } + defer b.DecRef() + + b1, err := b.Create(ctx, r, "b1.txt", fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + return err + } + b1.DecRef() + + if err := b.CreateDirectory(ctx, r, "c", fs.FilePermsFromMode(0777)); err != nil { + return err + } + + c, err := b.Walk(ctx, r, "c") + if err != nil { + return err + } + defer c.DecRef() + + c1, err := c.Create(ctx, r, "c1.txt", fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + return err + } + c1.DecRef() + + if err := r.CreateDirectory(ctx, r, "symlinks", fs.FilePermsFromMode(0777)); err != nil { + return err + } + + symlinks, err := r.Walk(ctx, r, "symlinks") + if err != nil { + return err + } + defer symlinks.DecRef() + + normal, err := symlinks.Create(ctx, r, "normal.txt", fs.FileFlags{Read: true, Write: true}, fs.FilePermsFromMode(0666)) + if err != nil { + return err + } + normal.DecRef() + + if err := symlinks.CreateLink(ctx, r, "/symlinks/normal.txt", "to_normal.txt"); err != nil { + return err + } + + if err := symlinks.CreateLink(ctx, r, "/symlinks", "recursive"); err != nil { + return err + } + + return nil +} + +// allPaths returns a slice of all paths of entries visible in the rootfs. +func allPaths(ctx context.Context, t *testing.T, m *fs.MountNamespace, base string) ([]string, error) { + var paths []string + root := m.Root() + defer root.DecRef() + + d, err := m.FindLink(ctx, root, nil, base, 1) + if err != nil { + t.Logf("FindLink failed for %q", base) + return paths, err + } + defer d.DecRef() + + if fs.IsDir(d.Inode.StableAttr) { + dir, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + return nil, fmt.Errorf("failed to open directory %q: %v", base, err) + } + iter, ok := dir.FileOperations.(fs.DirIterator) + if !ok { + return nil, fmt.Errorf("cannot directly iterate on host directory %q", base) + } + dirCtx := &fs.DirCtx{ + Serializer: noopDentrySerializer{}, + } + if _, err := fs.DirentReaddir(ctx, d, iter, root, dirCtx, 0); err != nil { + return nil, err + } + for name := range dirCtx.DentAttrs() { + if name == "." || name == ".." { + continue + } + + fullName := path.Join(base, name) + paths = append(paths, fullName) + + // Recurse. + subpaths, err := allPaths(ctx, t, m, fullName) + if err != nil { + return paths, err + } + paths = append(paths, subpaths...) + } + } + + return paths, nil +} + +type noopDentrySerializer struct{} + +func (noopDentrySerializer) CopyOut(string, fs.DentAttr) error { + return nil +} +func (noopDentrySerializer) Written() int { + return 4096 +} + +// pathsEqual returns true if the two string slices contain the same entries. +func pathsEqual(got, want []string) bool { + sort.Strings(got) + sort.Strings(want) + + if len(got) != len(want) { + return false + } + + for i := range got { + if got[i] != want[i] { + return false + } + } + + return true +} + +func TestWhitelist(t *testing.T) { + for _, test := range []struct { + // description of the test. + desc string + // paths are the paths to whitelist + paths []string + // want are all of the directory entries that should be + // visible (nothing beyond this set should be visible). + want []string + }{ + { + desc: "root", + paths: []string{"/"}, + want: []string{"/a", "/a/a1.txt", "/a/a2.txt", "/b", "/b/b1.txt", "/b/c", "/b/c/c1.txt", "/symlinks", "/symlinks/normal.txt", "/symlinks/to_normal.txt", "/symlinks/recursive"}, + }, + { + desc: "top-level directories", + paths: []string{"/a", "/b"}, + want: []string{"/a", "/a/a1.txt", "/a/a2.txt", "/b", "/b/b1.txt", "/b/c", "/b/c/c1.txt"}, + }, + { + desc: "nested directories (1/2)", + paths: []string{"/b", "/b/c"}, + want: []string{"/b", "/b/b1.txt", "/b/c", "/b/c/c1.txt"}, + }, + { + desc: "nested directories (2/2)", + paths: []string{"/b/c", "/b"}, + want: []string{"/b", "/b/b1.txt", "/b/c", "/b/c/c1.txt"}, + }, + { + desc: "single file", + paths: []string{"/b/c/c1.txt"}, + want: []string{"/b", "/b/c", "/b/c/c1.txt"}, + }, + { + desc: "single file and directory", + paths: []string{"/a/a1.txt", "/b/c"}, + want: []string{"/a", "/a/a1.txt", "/b", "/b/c", "/b/c/c1.txt"}, + }, + { + desc: "symlink", + paths: []string{"/symlinks/to_normal.txt"}, + want: []string{"/symlinks", "/symlinks/normal.txt", "/symlinks/to_normal.txt"}, + }, + { + desc: "recursive symlink", + paths: []string{"/symlinks/recursive/normal.txt"}, + want: []string{"/symlinks", "/symlinks/normal.txt", "/symlinks/recursive"}, + }, + } { + t.Run(test.desc, func(t *testing.T) { + m, p, err := newTestMountNamespace(t) + if err != nil { + t.Errorf("Failed to create MountNamespace: %v", err) + } + defer os.RemoveAll(p) + + ctx := withRoot(contexttest.RootContext(t), m.Root()) + if err := createTestDirs(ctx, t, m); err != nil { + t.Errorf("Failed to create test dirs: %v", err) + } + + if err := installWhitelist(ctx, m, test.paths); err != nil { + t.Errorf("installWhitelist(%v) err got %v want nil", test.paths, err) + } + + got, err := allPaths(ctx, t, m, "/") + if err != nil { + t.Fatalf("Failed to lookup paths (whitelisted: %v): %v", test.paths, err) + } + + if !pathsEqual(got, test.want) { + t.Errorf("For paths %v got %v want %v", test.paths, got, test.want) + } + }) + } +} + +func TestRootPath(t *testing.T) { + // Create a temp dir, which will be the root of our mounted fs. + rootPath, err := ioutil.TempDir(os.TempDir(), "root") + if err != nil { + t.Fatalf("TempDir failed: %v", err) + } + defer os.RemoveAll(rootPath) + + // Create two files inside the new root, one which will be whitelisted + // and one not. + whitelisted, err := ioutil.TempFile(rootPath, "white") + if err != nil { + t.Fatalf("TempFile failed: %v", err) + } + if _, err := ioutil.TempFile(rootPath, "black"); err != nil { + t.Fatalf("TempFile failed: %v", err) + } + + // Create a mount with a root path and single whitelisted file. + hostFS := &Filesystem{} + ctx := contexttest.Context(t) + data := fmt.Sprintf("%s=%s,%s=%s", rootPathKey, rootPath, whitelistKey, whitelisted.Name()) + inode, err := hostFS.Mount(ctx, "", fs.MountSourceFlags{}, data) + if err != nil { + t.Fatalf("Mount failed: %v", err) + } + mm, err := fs.NewMountNamespace(ctx, inode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + if err := hostFS.InstallWhitelist(ctx, mm); err != nil { + t.Fatalf("InstallWhitelist failed: %v", err) + } + + // Get the contents of the root directory. + rootDir := mm.Root() + rctx := withRoot(ctx, rootDir) + f, err := rootDir.Inode.GetFile(rctx, rootDir, fs.FileFlags{}) + if err != nil { + t.Fatalf("GetFile failed: %v", err) + } + c := &fs.CollectEntriesSerializer{} + if err := f.Readdir(rctx, c); err != nil { + t.Fatalf("Readdir failed: %v", err) + } + + // We should have only our whitelisted file, plus the dots. + want := []string{path.Base(whitelisted.Name()), ".", ".."} + got := c.Order + sort.Strings(want) + sort.Strings(got) + if !reflect.DeepEqual(got, want) { + t.Errorf("Readdir got %v, wanted %v", got, want) + } +} + +type rootContext struct { + context.Context + root *fs.Dirent +} + +// withRoot returns a copy of ctx with the given root. +func withRoot(ctx context.Context, root *fs.Dirent) context.Context { + return &rootContext{ + Context: ctx, + root: root, + } +} + +// Value implements Context.Value. +func (rc rootContext) Value(key interface{}) interface{} { + switch key { + case fs.CtxRoot: + rc.root.IncRef() + return rc.root + default: + return rc.Context.Value(key) + } +} diff --git a/pkg/sentry/fs/host/inode.go b/pkg/sentry/fs/host/inode.go new file mode 100644 index 000000000..226bc5164 --- /dev/null +++ b/pkg/sentry/fs/host/inode.go @@ -0,0 +1,506 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/secio" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// inodeOperations implements fs.InodeOperations for an fs.Inodes backed +// by a host file descriptor. +type inodeOperations struct { + fsutil.InodeNotVirtual `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.DeprecatedFileOperations `state:"nosave"` + + // fileState implements fs.CachedFileObject. It exists + // to break a circular load dependency between inodeOperations + // and cachingInodeOps (below). + fileState *inodeFileState `state:"wait"` + + // cachedInodeOps implements memmap.Mappable. + cachingInodeOps *fsutil.CachingInodeOperations + + // readdirMu protects the file offset on the host FD. This is needed + // for readdir because getdents must use the kernel offset, so + // concurrent readdirs must be exclusive. + // + // All read/write functions pass the offset directly to the kernel and + // thus don't need a lock. + readdirMu sync.Mutex `state:"nosave"` +} + +// inodeFileState implements fs.CachedFileObject and otherwise fully +// encapsulates state that needs to be manually loaded on restore for +// this file object. +// +// This unfortunate structure exists because fs.CachingInodeOperations +// defines afterLoad and therefore cannot be lazily loaded (to break a +// circular load dependency between it and inodeOperations). Even with +// lazy loading, this approach defines the dependencies between objects +// and the expected load behavior more concretely. +type inodeFileState struct { + // Common file system state. + mops *superOperations `state:"wait"` + + // descriptor is the backing host fd. + descriptor *descriptor `state:"wait"` + + // Event queue for blocking operations. + queue waiter.Queue `state:"nosave"` + + // sattr is used to restore the inodeOperations. + sattr fs.StableAttr `state:"wait"` + + // savedUAttr is only allocated during S/R. It points to the save-time + // unstable attributes and is used to validate restore-time ones. + // + // Note that these unstable attributes are only used to detect cross-S/R + // external file system metadata changes. They may differ from the + // cached unstable attributes in cachingInodeOps, as that might differ + // from the external file system attributes if there had been WriteOut + // failures. S/R is transparent to Sentry and the latter will continue + // using its cached values after restore. + savedUAttr *fs.UnstableAttr +} + +// ReadToBlocksAt implements fsutil.CachedFileObject.ReadToBlocksAt. +func (i *inodeFileState) ReadToBlocksAt(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error) { + // TODO: Using safemem.FromIOReader here is wasteful for two + // reasons: + // + // - Using preadv instead of iterated preads saves on host system calls. + // + // - Host system calls can handle destination memory that would fault in + // gr3 (i.e. they can accept safemem.Blocks with NeedSafecopy() == true), + // so the buffering performed by FromIOReader is unnecessary. + // + // This also applies to the write path below. + return safemem.FromIOReader{secio.NewOffsetReader(fd.NewReadWriter(i.FD()), int64(offset))}.ReadToBlocks(dsts) +} + +// WriteFromBlocksAt implements fsutil.CachedFileObject.WriteFromBlocksAt. +func (i *inodeFileState) WriteFromBlocksAt(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error) { + return safemem.FromIOWriter{secio.NewOffsetWriter(fd.NewReadWriter(i.FD()), int64(offset))}.WriteFromBlocks(srcs) +} + +// SetMaskedAttributes implements fsutil.CachedFileObject.SetMaskedAttributes. +func (i *inodeFileState) SetMaskedAttributes(ctx context.Context, mask fs.AttrMask, attr fs.UnstableAttr) error { + if mask.Empty() { + return nil + } + if mask.UID || mask.GID { + return syserror.EPERM + } + if mask.Perms { + if err := syscall.Fchmod(i.FD(), uint32(attr.Perms.LinuxMode())); err != nil { + return err + } + } + if mask.Size { + if err := syscall.Ftruncate(i.FD(), attr.Size); err != nil { + return err + } + } + if mask.AccessTime || mask.ModificationTime { + ts := fs.TimeSpec{ + ATime: attr.AccessTime, + ATimeOmit: !mask.AccessTime, + MTime: attr.ModificationTime, + MTimeOmit: !mask.ModificationTime, + } + if err := setTimestamps(i.FD(), ts); err != nil { + return err + } + } + return nil +} + +// Sync implements fsutil.CachedFileObject.Sync. +func (i *inodeFileState) Sync(ctx context.Context) error { + return syscall.Fsync(i.FD()) +} + +// FD implements fsutil.CachedFileObject.FD. +func (i *inodeFileState) FD() int { + return i.descriptor.value +} + +func (i *inodeFileState) unstableAttr(ctx context.Context) (fs.UnstableAttr, error) { + var s syscall.Stat_t + if err := syscall.Fstat(i.FD(), &s); err != nil { + return fs.UnstableAttr{}, err + } + return unstableAttr(i.mops, &s), nil +} + +// inodeOperations implements fs.InodeOperations. +var _ fs.InodeOperations = (*inodeOperations)(nil) + +// newInode returns a new fs.Inode backed by the host fd. +func newInode(ctx context.Context, msrc *fs.MountSource, fd int, saveable bool, donated bool) (*fs.Inode, error) { + // Retrieve metadata. + var s syscall.Stat_t + err := syscall.Fstat(fd, &s) + if err != nil { + return nil, err + } + + fileState := &inodeFileState{ + mops: msrc.MountSourceOperations.(*superOperations), + sattr: stableAttr(&s), + } + + // Initialize the wrapped host file descriptor. + fileState.descriptor, err = newDescriptor( + fd, + donated, + saveable, + wouldBlock(&s), + &fileState.queue, + ) + if err != nil { + return nil, err + } + + // Build the fs.InodeOperations. + uattr := unstableAttr(msrc.MountSourceOperations.(*superOperations), &s) + iops := &inodeOperations{ + fileState: fileState, + cachingInodeOps: fsutil.NewCachingInodeOperations(ctx, fileState, uattr, msrc.Flags.ForcePageCache), + } + + // Return the fs.Inode. + return fs.NewInode(iops, msrc, fileState.sattr), nil +} + +// Mappable implements fs.InodeOperations.Mappable. +func (i *inodeOperations) Mappable(inode *fs.Inode) memmap.Mappable { + if !canMap(inode) { + return nil + } + return i.cachingInodeOps +} + +// ReturnsWouldBlock returns true if this host fd can return EWOULDBLOCK +// for operations that would block. +func (i *inodeOperations) ReturnsWouldBlock() bool { + return i.fileState.descriptor.wouldBlock +} + +// Release implements fs.InodeOperations.Release. +func (i *inodeOperations) Release(context.Context) { + i.fileState.descriptor.Release() + i.cachingInodeOps.Release() +} + +// Lookup implements fs.InodeOperations.Lookup. +func (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + // Get a new fd relative to i at name. + fd, err := open(i, name) + if err != nil { + if err == syserror.ENOENT { + return nil, syserror.ENOENT + } + return nil, err + } + + inode, err := newInode(ctx, dir.MountSource, fd, false /* saveable */, false /* donated */) + if err != nil { + return nil, err + } + + // Return the fs.Dirent. + return fs.NewDirent(inode, name), nil +} + +// Create implements fs.InodeOperations.Create. +func (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + // Create a file relative to i at name. + // + // N.B. We always open this file O_RDWR regardless of flags because a + // future GetFile might want more access. Open allows this regardless + // of perm. + fd, err := openAt(i, name, syscall.O_RDWR|syscall.O_CREAT|syscall.O_EXCL, perm.LinuxMode()) + if err != nil { + return nil, err + } + + inode, err := newInode(ctx, dir.MountSource, fd, false /* saveable */, false /* donated */) + if err != nil { + return nil, err + } + + d := fs.NewDirent(inode, name) + defer d.DecRef() + return inode.GetFile(ctx, d, flags) +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syscall.Mkdirat(i.fileState.FD(), name, uint32(perm.LinuxMode())) +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error { + return createLink(i.fileState.FD(), oldname, newname) +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +func (*inodeOperations) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error { + return syserror.EPERM +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return syserror.EOPNOTSUPP +} + +// Remove implements fs.InodeOperations.Remove. +func (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + return unlinkAt(i.fileState.FD(), name, false /* dir */) +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + return unlinkAt(i.fileState.FD(), name, true /* dir */) +} + +// Rename implements fs.InodeOperations.Rename. +func (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + op, ok := oldParent.InodeOperations.(*inodeOperations) + if !ok { + return syscall.EXDEV + } + np, ok := newParent.InodeOperations.(*inodeOperations) + if !ok { + return syscall.EXDEV + } + return syscall.Renameat(op.fileState.FD(), oldName, np.fileState.FD(), newName) +} + +// Bind implements fs.InodeOperations.Bind. +func (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data unix.BoundEndpoint, perm fs.FilePermissions) error { + return syserror.EOPNOTSUPP +} + +// BoundEndpoint implements fs.InodeOperations.BoundEndpoint. +func (i *inodeOperations) BoundEndpoint(inode *fs.Inode, path string) unix.BoundEndpoint { + return nil +} + +// GetFile implements fs.InodeOperations.GetFile. +func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return newFile(ctx, d, flags, i, false), nil +} + +// canMap returns true if this fs.Inode can be memory mapped. +func canMap(inode *fs.Inode) bool { + // FIXME: Some obscure character devices can be mapped. + return fs.IsFile(inode.StableAttr) +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + // When the kernel supports mapping host FDs, we do so to take + // advantage of the host page cache. We forego updating fs.Inodes + // because the host manages consistency of its own inode structures. + // + // For fs.Inodes that can never be mapped we take advantage of + // synchronizing metadata updates through host caches. + // + // So can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just obtain the attributes. + return i.fileState.unstableAttr(ctx) + } + // No, we're maintaining consistency of metadata ourselves. + return i.cachingInodeOps.UnstableAttr(ctx, inode) +} + +// Check implements fs.InodeOperations.Check. +func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *inodeOperations) SetOwner(context.Context, *fs.Inode, fs.FileOwner) error { + return syserror.EPERM +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (i *inodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, f fs.FilePermissions) bool { + // Can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just change the timestamps on the fd, the host + // will synchronize the metadata update with any host + // inode and page cache. + return syscall.Fchmod(i.fileState.FD(), uint32(f.LinuxMode())) == nil + } + // Otherwise update our cached metadata. + return i.cachingInodeOps.SetPermissions(ctx, inode, f) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *inodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + // Can we use host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then just change the timestamps on the fd, the host + // will synchronize the metadata update with any host + // inode and page cache. + return setTimestamps(i.fileState.FD(), ts) + } + // Otherwise update our cached metadata. + return i.cachingInodeOps.SetTimestamps(ctx, inode, ts) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (i *inodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + // Is the file not memory-mappable? + if !canMap(inode) { + // Then just change the file size on the fd, the host + // will synchronize the metadata update with any host + // inode and page cache. + return syscall.Ftruncate(i.fileState.FD(), size) + } + // Otherwise we need to go through cachingInodeOps, even if the host page + // cache is in use, to invalidate private copies of truncated pages. + return i.cachingInodeOps.Truncate(ctx, inode, size) +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (i *inodeOperations) WriteOut(ctx context.Context, inode *fs.Inode) error { + // Have we been using host kernel metadata caches? + if !inode.MountSource.Flags.ForcePageCache || !canMap(inode) { + // Then the metadata is already up to date on the host. + return nil + } + // Otherwise we need to write out cached pages and attributes + // that are dirty. + return i.cachingInodeOps.WriteOut(ctx, inode) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (i *inodeOperations) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + return readLink(i.fileState.FD()) +} + +// Getlink implements fs.InodeOperations.Getlink. +func (i *inodeOperations) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + if !fs.IsSymlink(i.fileState.sattr) { + return nil, syserror.ENOLINK + } + return nil, fs.ErrResolveViaReadlink +} + +// StatFS implements fs.InodeOperations.StatFS. +func (i *inodeOperations) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syserror.ENOSYS +} + +// AddLink implements fs.InodeOperations.AddLink. +// FIXME: Remove this from InodeOperations altogether. +func (i *inodeOperations) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +// FIXME: Remove this from InodeOperations altogether. +func (i *inodeOperations) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +// FIXME: Remove this from InodeOperations altogether. +func (i *inodeOperations) NotifyStatusChange(ctx context.Context) {} + +// readdirAll returns all of the directory entries in i. +func (i *inodeOperations) readdirAll(d *dirInfo) (map[string]fs.DentAttr, error) { + i.readdirMu.Lock() + defer i.readdirMu.Unlock() + + fd := i.fileState.FD() + + // syscall.ReadDirent will use getdents, which will seek the file past + // the last directory entry. To read the directory entries a second + // time, we need to seek back to the beginning. + if _, err := syscall.Seek(fd, 0, 0); err != nil { + if err == syscall.ESPIPE { + // All directories should be seekable. If this file + // isn't seekable, it is not a directory and we should + // return that more sane error. + err = syscall.ENOTDIR + } + return nil, err + } + + names := make([]string, 0, 100) + for { + // Refill the buffer if necessary + if d.bufp >= d.nbuf { + d.bufp = 0 + // ReadDirent will just do a sys_getdents64 to the kernel. + n, err := syscall.ReadDirent(fd, d.buf) + if err != nil { + return nil, err + } + if n == 0 { + break // EOF + } + d.nbuf = n + } + + var nb int + // Parse the dirent buffer we just get and return the directory names along + // with the number of bytes consumed in the buffer. + nb, _, names = syscall.ParseDirent(d.buf[d.bufp:d.nbuf], -1, names) + d.bufp += nb + } + + entries := make(map[string]fs.DentAttr) + for _, filename := range names { + // Lookup the type and host device and inode. + stat, lerr := fstatat(fd, filename, linux.AT_SYMLINK_NOFOLLOW) + if lerr == syscall.ENOENT { + // File disappeared between readdir and lstat. + // Just treat it as if it didn't exist. + continue + } + + // There was a serious problem, we should probably report it. + if lerr != nil { + return nil, lerr + } + + entries[filename] = fs.DentAttr{ + Type: nodeType(&stat), + InodeID: hostFileDevice.Map(device.MultiDeviceKey{ + Device: stat.Dev, + Inode: stat.Ino, + }), + } + } + return entries, nil +} diff --git a/pkg/sentry/fs/host/inode_state.go b/pkg/sentry/fs/host/inode_state.go new file mode 100644 index 000000000..80066512a --- /dev/null +++ b/pkg/sentry/fs/host/inode_state.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// beforeSave is invoked by stateify. +func (i *inodeFileState) beforeSave() { + if !i.queue.IsEmpty() { + panic("event queue must be empty") + } + if !i.descriptor.donated && i.sattr.Type == fs.RegularFile { + uattr, err := i.unstableAttr(context.Background()) + if err != nil { + panic(fmt.Sprintf("failed to get unstable atttribute of %s: %v", i.mops.inodeMappings[i.sattr.InodeID], err)) + } + i.savedUAttr = &uattr + } +} + +// afterLoad is invoked by stateify. +func (i *inodeFileState) afterLoad() { + // Initialize the descriptor value. + if err := i.descriptor.initAfterLoad(i.mops, i.sattr.InodeID, &i.queue); err != nil { + panic(fmt.Sprintf("failed to load value of descriptor: %v", err)) + } + + // Remap the inode number. + var s syscall.Stat_t + if err := syscall.Fstat(i.FD(), &s); err != nil { + panic(fmt.Sprintf("failed to get metadata for fd %d: %v", i.FD(), err)) + } + key := device.MultiDeviceKey{ + Device: s.Dev, + Inode: s.Ino, + } + if !hostFileDevice.Load(key, i.sattr.InodeID) { + // This means there was a conflict at s.Dev and s.Ino with + // another inode mapping: two files that were unique on the + // saved filesystem are no longer unique on this filesystem. + // Since this violates the contract that filesystems cannot + // change across save and restore, error out. + panic(fmt.Sprintf("host %s conflict in host device mappings: %s", key, hostFileDevice)) + } + + if !i.descriptor.donated && i.sattr.Type == fs.RegularFile { + env, ok := fs.CurrentRestoreEnvironment() + if !ok { + panic("missing restore environment") + } + uattr := unstableAttr(i.mops, &s) + if env.ValidateFileSize && uattr.Size != i.savedUAttr.Size { + panic(fmt.Errorf("file size has changed for %s: previously %d, now %d", i.mops.inodeMappings[i.sattr.InodeID], i.savedUAttr.Size, uattr.Size)) + } + if env.ValidateFileTimestamp && uattr.ModificationTime != i.savedUAttr.ModificationTime { + panic(fmt.Errorf("file modification time has changed for %s: previously %v, now %v", i.mops.inodeMappings[i.sattr.InodeID], i.savedUAttr.ModificationTime, uattr.ModificationTime)) + } + i.savedUAttr = nil + } +} diff --git a/pkg/sentry/fs/host/inode_test.go b/pkg/sentry/fs/host/inode_test.go new file mode 100644 index 000000000..0ff87c418 --- /dev/null +++ b/pkg/sentry/fs/host/inode_test.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// TestMultipleReaddir verifies that multiple Readdir calls return the same +// thing if they use different dir contexts. +func TestMultipleReaddir(t *testing.T) { + p, err := ioutil.TempDir("", "readdir") + if err != nil { + t.Fatalf("Failed to create test dir: %v", err) + } + defer os.RemoveAll(p) + + f, err := os.Create(path.Join(p, "a.txt")) + if err != nil { + t.Fatalf("Failed to create a.txt: %v", err) + } + f.Close() + + f, err = os.Create(path.Join(p, "b.txt")) + if err != nil { + t.Fatalf("Failed to create b.txt: %v", err) + } + f.Close() + + fd, err := open(nil, p) + if err != nil { + t.Fatalf("Failed to open %q: %v", p, err) + } + ctx := contexttest.Context(t) + n, err := newInode(ctx, newMountSource(ctx, p, fs.RootOwner, &Filesystem{}, fs.MountSourceFlags{}, false), fd, false, false) + if err != nil { + t.Fatalf("Failed to create inode: %v", err) + } + + dirent := fs.NewDirent(n, "readdir") + openFile, err := n.GetFile(ctx, dirent, fs.FileFlags{Read: true}) + if err != nil { + t.Fatalf("Failed to get file: %v", err) + } + defer openFile.DecRef() + + c1 := &fs.DirCtx{DirCursor: new(string)} + if _, err := openFile.FileOperations.(*fileOperations).IterateDir(ctx, c1, 0); err != nil { + t.Fatalf("First Readdir failed: %v", err) + } + + c2 := &fs.DirCtx{DirCursor: new(string)} + if _, err := openFile.FileOperations.(*fileOperations).IterateDir(ctx, c2, 0); err != nil { + t.Errorf("Second Readdir failed: %v", err) + } + + if _, ok := c1.DentAttrs()["a.txt"]; !ok { + t.Errorf("want a.txt in first Readdir, got %v", c1.DentAttrs()) + } + if _, ok := c1.DentAttrs()["b.txt"]; !ok { + t.Errorf("want b.txt in first Readdir, got %v", c1.DentAttrs()) + } + + if _, ok := c2.DentAttrs()["a.txt"]; !ok { + t.Errorf("want a.txt in second Readdir, got %v", c2.DentAttrs()) + } + if _, ok := c2.DentAttrs()["b.txt"]; !ok { + t.Errorf("want b.txt in second Readdir, got %v", c2.DentAttrs()) + } +} + +// TestCloseFD verifies fds will be closed. +func TestCloseFD(t *testing.T) { + var p [2]int + if err := syscall.Pipe(p[0:]); err != nil { + t.Fatalf("Failed to create pipe %v", err) + } + defer syscall.Close(p[0]) + defer syscall.Close(p[1]) + + // Use the write-end because we will detect if it's closed on the read end. + ctx := contexttest.Context(t) + file, err := NewFile(ctx, p[1], fs.RootOwner) + if err != nil { + t.Fatalf("Failed to create File: %v", err) + } + file.DecRef() + + s := make([]byte, 10) + if c, err := syscall.Read(p[0], s); c != 0 || err != nil { + t.Errorf("want 0, nil (EOF) from read end, got %v, %v", c, err) + } +} diff --git a/pkg/sentry/fs/host/ioctl_unsafe.go b/pkg/sentry/fs/host/ioctl_unsafe.go new file mode 100644 index 000000000..3c07c3850 --- /dev/null +++ b/pkg/sentry/fs/host/ioctl_unsafe.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +func ioctlGetTermios(fd int) (*linux.Termios, error) { + var t linux.Termios + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&t))) + if errno != 0 { + return nil, errno + } + return &t, nil +} + +func ioctlSetTermios(fd int, req uint64, t *linux.Termios) error { + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(unsafe.Pointer(t))) + if errno != 0 { + return errno + } + return nil +} diff --git a/pkg/sentry/fs/host/socket.go b/pkg/sentry/fs/host/socket.go new file mode 100644 index 000000000..8e36ed7ee --- /dev/null +++ b/pkg/sentry/fs/host/socket.go @@ -0,0 +1,471 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control" + unixsocket "gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/unet" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +// endpoint encapsulates the state needed to represent a host Unix socket. +type endpoint struct { + queue waiter.Queue `state:"nosave"` + + // stype is the type of Unix socket. (Ex: unix.SockStream, + // unix.SockSeqpacket, unix.SockDgram) + stype unix.SockType `state:"nosave"` + + // fd is the host fd backing this file. + fd int `state:"nosave"` + + // If srfd >= 0, it is the host fd that fd was imported from. + srfd int `state:"wait"` +} + +func (e *endpoint) init() error { + family, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_DOMAIN) + if err != nil { + return err + } + + if family != syscall.AF_UNIX { + // We only allow Unix sockets. + return syserror.EINVAL + } + + stype, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_TYPE) + if err != nil { + return err + } + + if err := syscall.SetNonblock(e.fd, true); err != nil { + return err + } + + e.stype = unix.SockType(stype) + if err := fdnotifier.AddFD(int32(e.fd), &e.queue); err != nil { + return err + } + return nil +} + +// newEndpoint creates a new host endpoint. +func newEndpoint(fd int, srfd int) (*endpoint, error) { + ep := &endpoint{fd: fd, srfd: srfd} + if err := ep.init(); err != nil { + return nil, err + } + return ep, nil +} + +// newSocket allocates a new unix socket with host endpoint. +func newSocket(ctx context.Context, fd int, saveable bool) (*fs.File, error) { + ownedfd := fd + srfd := -1 + if saveable { + var err error + ownedfd, err = syscall.Dup(fd) + if err != nil { + return nil, err + } + srfd = fd + } + ep, err := newEndpoint(ownedfd, srfd) + if err != nil { + if saveable { + syscall.Close(ownedfd) + } + return nil, err + } + return unixsocket.New(ctx, ep), nil +} + +// NewSocketWithDirent allocates a new unix socket with host endpoint. +// +// This is currently only used by unsaveable Gofer nodes. +// +// NewSocketWithDirent takes ownership of f on success. +func NewSocketWithDirent(ctx context.Context, d *fs.Dirent, f *fd.FD, flags fs.FileFlags) (*fs.File, error) { + ep, err := newEndpoint(f.FD(), -1) + if err != nil { + return nil, err + } + + // Take ownship of the FD. + f.Release() + + return unixsocket.NewWithDirent(ctx, d, ep, flags), nil +} + +// Close implements unix.Endpoint.Close. +func (e *endpoint) Close() { + fdnotifier.RemoveFD(int32(e.fd)) + syscall.Close(e.fd) + e.fd = -1 +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (e *endpoint) EventRegister(we *waiter.Entry, mask waiter.EventMask) { + e.queue.EventRegister(we, mask) + fdnotifier.UpdateFD(int32(e.fd)) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (e *endpoint) EventUnregister(we *waiter.Entry) { + e.queue.EventUnregister(we) + fdnotifier.UpdateFD(int32(e.fd)) +} + +// Readiness implements unix.Endpoint.Readiness. +func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + return fdnotifier.NonBlockingPoll(int32(e.fd), mask) +} + +// Type implements unix.Endpoint.Type. +func (e *endpoint) Type() unix.SockType { + return e.stype +} + +// Connect implements unix.Endpoint.Connect. +func (e *endpoint) Connect(server unix.BoundEndpoint) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// Bind implements unix.Endpoint.Bind. +func (e *endpoint) Bind(address tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// Listen implements unix.Endpoint.Listen. +func (e *endpoint) Listen(backlog int) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// Accept implements unix.Endpoint.Accept. +func (e *endpoint) Accept() (unix.Endpoint, *tcpip.Error) { + return nil, tcpip.ErrInvalidEndpointState +} + +// Shutdown implements unix.Endpoint.Shutdown. +func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// GetSockOpt implements unix.Endpoint.GetSockOpt. +func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error { + switch o := opt.(type) { + case tcpip.ErrorOption: + _, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_ERROR) + return translateError(err) + case *tcpip.PasscredOption: + // We don't support passcred on host sockets. + *o = 0 + return nil + case *tcpip.SendBufferSizeOption: + v, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF) + *o = tcpip.SendBufferSizeOption(v) + return translateError(err) + case *tcpip.ReceiveBufferSizeOption: + v, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF) + *o = tcpip.ReceiveBufferSizeOption(v) + return translateError(err) + case *tcpip.ReuseAddressOption: + v, err := syscall.GetsockoptInt(e.fd, syscall.SOL_SOCKET, syscall.SO_REUSEADDR) + *o = tcpip.ReuseAddressOption(v) + return translateError(err) + case *tcpip.ReceiveQueueSizeOption: + return tcpip.ErrQueueSizeNotSupported + } + return tcpip.ErrInvalidEndpointState +} + +// SetSockOpt implements unix.Endpoint.SetSockOpt. +func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error { + return nil +} + +// GetLocalAddress implements unix.Endpoint.GetLocalAddress. +func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{}, nil +} + +// GetRemoteAddress implements unix.Endpoint.GetRemoteAddress. +func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{}, nil +} + +// Passcred returns whether or not the SO_PASSCRED socket option is +// enabled on this end. +func (e *endpoint) Passcred() bool { + // We don't support credential passing for host sockets. + return false +} + +// ConnectedPasscred returns whether or not the SO_PASSCRED socket option +// is enabled on the connected end. +func (e *endpoint) ConnectedPasscred() bool { + // We don't support credential passing for host sockets. + return false +} + +// SendMsg implements unix.Endpoint.SendMsg. +func (e *endpoint) SendMsg(data [][]byte, controlMessages unix.ControlMessages, to unix.BoundEndpoint) (uintptr, *tcpip.Error) { + if to != nil { + return 0, tcpip.ErrInvalidEndpointState + } + return sendMsg(e.fd, data, controlMessages) +} + +func sendMsg(fd int, data [][]byte, controlMessages unix.ControlMessages) (uintptr, *tcpip.Error) { + if !controlMessages.Empty() { + return 0, tcpip.ErrInvalidEndpointState + } + n, err := fdWriteVec(fd, data) + return n, translateError(err) +} + +// RecvMsg implements unix.Endpoint.RecvMsg. +func (e *endpoint) RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (uintptr, uintptr, unix.ControlMessages, *tcpip.Error) { + return recvMsg(e.fd, data, numRights, peek, addr) +} + +func recvMsg(fd int, data [][]byte, numRights uintptr, peek bool, addr *tcpip.FullAddress) (uintptr, uintptr, unix.ControlMessages, *tcpip.Error) { + var cm unet.ControlMessage + if numRights > 0 { + cm.EnableFDs(int(numRights)) + } + rl, ml, cl, err := fdReadVec(fd, data, []byte(cm), peek) + if err == syscall.EAGAIN { + return 0, 0, unix.ControlMessages{}, tcpip.ErrWouldBlock + } + if err != nil { + return 0, 0, unix.ControlMessages{}, translateError(err) + } + + // Trim the control data if we received less than the full amount. + if cl < uint64(len(cm)) { + cm = cm[:cl] + } + + // Avoid extra allocations in the case where there isn't any control data. + if len(cm) == 0 { + return rl, ml, unix.ControlMessages{}, nil + } + + fds, err := cm.ExtractFDs() + if err != nil { + return 0, 0, unix.ControlMessages{}, translateError(err) + } + + if len(fds) == 0 { + return rl, ml, unix.ControlMessages{}, nil + } + return rl, ml, control.New(nil, nil, newSCMRights(fds)), nil +} + +// NewConnectedEndpoint creates a new unix.Receiver and unix.ConnectedEndpoint +// backed by a host FD that will pretend to be bound at a given sentry path. +func NewConnectedEndpoint(file *fd.FD, queue *waiter.Queue, path string) (unix.Receiver, unix.ConnectedEndpoint, *tcpip.Error) { + if err := fdnotifier.AddFD(int32(file.FD()), queue); err != nil { + return nil, nil, translateError(err) + } + + e := &connectedEndpoint{path: path, queue: queue, file: file} + + // AtomicRefCounters start off with a single reference. We need two. + e.ref.IncRef() + + return e, e, nil +} + +// connectedEndpoint is a host FD backed implementation of +// unix.ConnectedEndpoint and unix.Receiver. +// +// connectedEndpoint does not support save/restore for now. +type connectedEndpoint struct { + queue *waiter.Queue + path string + + // ref keeps track of references to a connectedEndpoint. + ref refs.AtomicRefCount + + // mu protects fd, readClosed and writeClosed. + mu sync.RWMutex + + // file is an *fd.FD containing the FD backing this endpoint. It must be + // set to nil if it has been closed. + file *fd.FD + + // readClosed is true if the FD has read shutdown or if it has been closed. + readClosed bool + + // writeClosed is true if the FD has write shutdown or if it has been + // closed. + writeClosed bool +} + +// Send implements unix.ConnectedEndpoint.Send. +func (c *connectedEndpoint) Send(data [][]byte, controlMessages unix.ControlMessages, from tcpip.FullAddress) (uintptr, bool, *tcpip.Error) { + c.mu.RLock() + defer c.mu.RUnlock() + if c.writeClosed { + return 0, false, tcpip.ErrClosedForSend + } + n, err := sendMsg(c.file.FD(), data, controlMessages) + // There is no need for the callee to call SendNotify because sendMsg uses + // the host's sendmsg(2) and the host kernel's queue. + return n, false, err +} + +// SendNotify implements unix.ConnectedEndpoint.SendNotify. +func (c *connectedEndpoint) SendNotify() {} + +// CloseSend implements unix.ConnectedEndpoint.CloseSend. +func (c *connectedEndpoint) CloseSend() { + c.mu.Lock() + c.writeClosed = true + c.mu.Unlock() +} + +// CloseNotify implements unix.ConnectedEndpoint.CloseNotify. +func (c *connectedEndpoint) CloseNotify() {} + +// Writable implements unix.ConnectedEndpoint.Writable. +func (c *connectedEndpoint) Writable() bool { + c.mu.RLock() + defer c.mu.RUnlock() + if c.writeClosed { + return true + } + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventOut)&waiter.EventOut != 0 +} + +// Passcred implements unix.ConnectedEndpoint.Passcred. +func (c *connectedEndpoint) Passcred() bool { + // We don't support credential passing for host sockets. + return false +} + +// GetLocalAddress implements unix.ConnectedEndpoint.GetLocalAddress. +func (c *connectedEndpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{Addr: tcpip.Address(c.path)}, nil +} + +// EventUpdate implements unix.ConnectedEndpoint.EventUpdate. +func (c *connectedEndpoint) EventUpdate() { + c.mu.RLock() + defer c.mu.RUnlock() + if c.file.FD() != -1 { + fdnotifier.UpdateFD(int32(c.file.FD())) + } +} + +// Recv implements unix.Receiver.Recv. +func (c *connectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, unix.ControlMessages, tcpip.FullAddress, bool, *tcpip.Error) { + c.mu.RLock() + defer c.mu.RUnlock() + if c.readClosed { + return 0, 0, unix.ControlMessages{}, tcpip.FullAddress{}, false, tcpip.ErrClosedForReceive + } + rl, ml, cm, err := recvMsg(c.file.FD(), data, numRights, peek, nil) + // There is no need for the callee to call RecvNotify because recvMsg uses + // the host's recvmsg(2) and the host kernel's queue. + return rl, ml, cm, tcpip.FullAddress{Addr: tcpip.Address(c.path)}, false, err +} + +// close releases all resources related to the endpoint. +func (c *connectedEndpoint) close() { + fdnotifier.RemoveFD(int32(c.file.FD())) + c.file.Close() + c.file = nil +} + +// RecvNotify implements unix.Receiver.RecvNotify. +func (c *connectedEndpoint) RecvNotify() {} + +// CloseRecv implements unix.Receiver.CloseRecv. +func (c *connectedEndpoint) CloseRecv() { + c.mu.Lock() + c.readClosed = true + c.mu.Unlock() +} + +// Readable implements unix.Receiver.Readable. +func (c *connectedEndpoint) Readable() bool { + c.mu.RLock() + defer c.mu.RUnlock() + if c.readClosed { + return true + } + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventIn)&waiter.EventIn != 0 +} + +// SendQueuedSize implements unix.Receiver.SendQueuedSize. +func (c *connectedEndpoint) SendQueuedSize() int64 { + // SendQueuedSize isn't supported for host sockets because we don't allow the + // sentry to call ioctl(2). + return -1 +} + +// RecvQueuedSize implements unix.Receiver.RecvQueuedSize. +func (c *connectedEndpoint) RecvQueuedSize() int64 { + // RecvQueuedSize isn't supported for host sockets because we don't allow the + // sentry to call ioctl(2). + return -1 +} + +// SendMaxQueueSize implements unix.Receiver.SendMaxQueueSize. +func (c *connectedEndpoint) SendMaxQueueSize() int64 { + v, err := syscall.GetsockoptInt(c.file.FD(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) + if err != nil { + return -1 + } + return int64(v) +} + +// RecvMaxQueueSize implements unix.Receiver.RecvMaxQueueSize. +func (c *connectedEndpoint) RecvMaxQueueSize() int64 { + v, err := syscall.GetsockoptInt(c.file.FD(), syscall.SOL_SOCKET, syscall.SO_RCVBUF) + if err != nil { + return -1 + } + return int64(v) +} + +// Release implements unix.ConnectedEndpoint.Release and unix.Receiver.Release. +func (c *connectedEndpoint) Release() { + c.ref.DecRefWithDestructor(c.close) +} + +func translateError(err error) *tcpip.Error { + if err == nil { + return nil + } + return rawfile.TranslateErrno(err.(syscall.Errno)) +} diff --git a/pkg/sentry/fs/host/socket_state.go b/pkg/sentry/fs/host/socket_state.go new file mode 100644 index 000000000..6acabd55a --- /dev/null +++ b/pkg/sentry/fs/host/socket_state.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "fmt" + "syscall" +) + +// beforeSave is invoked by stateify. +func (ep *endpoint) beforeSave() { + if ep.srfd < 0 { + panic("only host file descriptors provided at sentry startup can be saved") + } +} + +// afterLoad is invoked by stateify. +func (ep *endpoint) afterLoad() { + fd, err := syscall.Dup(ep.srfd) + if err != nil { + panic(fmt.Sprintf("failed to dup restored fd %d: %v", ep.srfd, err)) + } + ep.fd = fd + if err := ep.init(); err != nil { + panic(fmt.Sprintf("Could not restore host socket fd %d: %v", ep.srfd, err)) + } +} diff --git a/pkg/sentry/fs/host/socket_test.go b/pkg/sentry/fs/host/socket_test.go new file mode 100644 index 000000000..80c46dcfa --- /dev/null +++ b/pkg/sentry/fs/host/socket_test.go @@ -0,0 +1,401 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "reflect" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/fd" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +var ( + // Make sure that connectedEndpoint implements unix.ConnectedEndpoint. + _ = unix.ConnectedEndpoint(new(connectedEndpoint)) + + // Make sure that connectedEndpoint implements unix.Receiver. + _ = unix.Receiver(new(connectedEndpoint)) +) + +func getFl(fd int) (uint32, error) { + fl, _, err := syscall.RawSyscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFL, 0) + if err == 0 { + return uint32(fl), nil + } + return 0, err +} + +func TestSocketIsBlocking(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + + fl, err := getFl(pair[0]) + if err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Fatalf("Expected socket %v to be blocking", pair[0]) + } + if fl, err = getFl(pair[1]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Fatalf("Expected socket %v to be blocking", pair[1]) + } + sock, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) failed => %v", pair[0], err) + } + defer sock.DecRef() + // Test that the socket now is non blocking. + if fl, err = getFl(pair[0]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[0], err) + } + if fl&syscall.O_NONBLOCK != syscall.O_NONBLOCK { + t.Errorf("Expected socket %v to have becoming non blocking", pair[0]) + } + if fl, err = getFl(pair[1]); err != nil { + t.Fatalf("getFl: fcntl(%v, GETFL) => %v", pair[1], err) + } + if fl&syscall.O_NONBLOCK == syscall.O_NONBLOCK { + t.Errorf("Did not expect socket %v to become non blocking", pair[1]) + } +} + +func TestSocketWritev(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + socket, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer socket.DecRef() + buf := []byte("hello world\n") + n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(buf)) + if err != nil { + t.Fatalf("socket writev failed: %v", err) + } + + if n != int64(len(buf)) { + t.Fatalf("socket writev wrote incorrect bytes: %d", n) + } +} + +func TestSocketWritevLen0(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + socket, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer socket.DecRef() + n, err := socket.Writev(contexttest.Context(t), usermem.BytesIOSequence(nil)) + if err != nil { + t.Fatalf("socket writev failed: %v", err) + } + + if n != 0 { + t.Fatalf("socket writev wrote incorrect bytes: %d", n) + } +} + +func TestSocketSendMsgLen0(t *testing.T) { + // Using socketpair here because it's already connected. + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("host socket creation failed: %v", err) + } + sfile, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer sfile.DecRef() + + s := sfile.FileOperations.(socket.Socket) + n, terr := s.SendMsg(nil, usermem.BytesIOSequence(nil), []byte{}, 0, unix.ControlMessages{}) + if n != 0 { + t.Fatalf("socket sendmsg() failed: %v wrote: %d", terr, n) + } + + if terr != nil { + t.Fatalf("socket sendmsg() failed: %v", terr) + } +} + +func TestListen(t *testing.T) { + pair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) => %v", err) + } + sfile1, err := newSocket(contexttest.Context(t), pair[0], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[0], err) + } + defer sfile1.DecRef() + socket1 := sfile1.FileOperations.(socket.Socket) + + sfile2, err := newSocket(contexttest.Context(t), pair[1], false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", pair[1], err) + } + defer sfile2.DecRef() + socket2 := sfile2.FileOperations.(socket.Socket) + + // Socketpairs can not be listened to. + if err := socket1.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket1.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } + if err := socket2.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket2.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } + + // Create a Unix socket, do not bind it. + sock, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + t.Fatalf("syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) => %v", err) + } + sfile3, err := newSocket(contexttest.Context(t), sock, false) + if err != nil { + t.Fatalf("newSocket(%v) => %v", sock, err) + } + defer sfile3.DecRef() + socket3 := sfile3.FileOperations.(socket.Socket) + + // This socket is not bound so we can't listen on it. + if err := socket3.Listen(nil, 64); err != syserr.ErrInvalidEndpointState { + t.Fatalf("socket3.Listen(nil, 64) => %v, want syserr.ErrInvalidEndpointState", err) + } +} + +func TestSend(t *testing.T) { + e := connectedEndpoint{writeClosed: true} + if _, _, err := e.Send(nil, unix.ControlMessages{}, tcpip.FullAddress{}); err != tcpip.ErrClosedForSend { + t.Errorf("Got %#v.Send() = %v, want = %v", e, err, tcpip.ErrClosedForSend) + } +} + +func TestRecv(t *testing.T) { + e := connectedEndpoint{readClosed: true} + if _, _, _, _, _, err := e.Recv(nil, false, 0, false); err != tcpip.ErrClosedForReceive { + t.Errorf("Got %#v.Recv() = %v, want = %v", e, err, tcpip.ErrClosedForReceive) + } +} + +func TestPasscred(t *testing.T) { + e := connectedEndpoint{} + if got, want := e.Passcred(), false; got != want { + t.Errorf("Got %#v.Passcred() = %t, want = %t", e, got, want) + } +} + +func TestGetLocalAddress(t *testing.T) { + e := connectedEndpoint{path: "foo"} + want := tcpip.FullAddress{Addr: tcpip.Address("foo")} + if got, err := e.GetLocalAddress(); err != nil || got != want { + t.Errorf("Got %#v.GetLocalAddress() = %#v, %v, want = %#v, %v", e, got, err, want, nil) + } +} + +func TestQueuedSize(t *testing.T) { + e := connectedEndpoint{} + tests := []struct { + name string + f func() int64 + }{ + {"SendQueuedSize", e.SendQueuedSize}, + {"RecvQueuedSize", e.RecvQueuedSize}, + } + + for _, test := range tests { + if got, want := test.f(), int64(-1); got != want { + t.Errorf("Got %#v.%s() = %d, want = %d", e, test.name, got, want) + } + } +} + +func TestReadable(t *testing.T) { + e := connectedEndpoint{readClosed: true} + if got, want := e.Readable(), true; got != want { + t.Errorf("Got %#v.Readable() = %t, want = %t", e, got, want) + } +} + +func TestWritable(t *testing.T) { + e := connectedEndpoint{writeClosed: true} + if got, want := e.Writable(), true; got != want { + t.Errorf("Got %#v.Writable() = %t, want = %t", e, got, want) + } +} + +func TestRelease(t *testing.T) { + f, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c := &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} + want := &connectedEndpoint{queue: c.queue} + want.ref.DecRef() + fdnotifier.AddFD(int32(c.file.FD()), nil) + c.Release() + if !reflect.DeepEqual(c, want) { + t.Errorf("got = %#v, want = %#v", c, want) + } +} + +func TestClose(t *testing.T) { + type testCase struct { + name string + cep *connectedEndpoint + addFD bool + f func() + want *connectedEndpoint + } + + var tests []testCase + + // nil is the value used by connectedEndpoint to indicate a closed file. + // Non-nil files are used to check if the file gets closed. + + f, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c := &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} + tests = append(tests, testCase{ + name: "First CloseRecv", + cep: c, + addFD: false, + f: c.CloseRecv, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true} + tests = append(tests, testCase{ + name: "Second CloseRecv", + cep: c, + addFD: false, + f: c.CloseRecv, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} + tests = append(tests, testCase{ + name: "First CloseSend", + cep: c, + addFD: false, + f: c.CloseSend, + want: &connectedEndpoint{queue: c.queue, file: c.file, writeClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), writeClosed: true} + tests = append(tests, testCase{ + name: "Second CloseSend", + cep: c, + addFD: false, + f: c.CloseSend, + want: &connectedEndpoint{queue: c.queue, file: c.file, writeClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), writeClosed: true} + tests = append(tests, testCase{ + name: "CloseSend then CloseRecv", + cep: c, + addFD: true, + f: c.CloseRecv, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true} + tests = append(tests, testCase{ + name: "CloseRecv then CloseSend", + cep: c, + addFD: true, + f: c.CloseSend, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true, writeClosed: true} + tests = append(tests, testCase{ + name: "Full close then CloseRecv", + cep: c, + addFD: false, + f: c.CloseRecv, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, + }) + + f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) + if err != nil { + t.Fatal("Creating socket:", err) + } + c = &connectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true, writeClosed: true} + tests = append(tests, testCase{ + name: "Full close then CloseSend", + cep: c, + addFD: false, + f: c.CloseSend, + want: &connectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, + }) + + for _, test := range tests { + if test.addFD { + fdnotifier.AddFD(int32(test.cep.file.FD()), nil) + } + if test.f(); !reflect.DeepEqual(test.cep, test.want) { + t.Errorf("%s: got = %#v, want = %#v", test.name, test.cep, test.want) + } + } +} diff --git a/pkg/sentry/fs/host/socket_unsafe.go b/pkg/sentry/fs/host/socket_unsafe.go new file mode 100644 index 000000000..bf8da6867 --- /dev/null +++ b/pkg/sentry/fs/host/socket_unsafe.go @@ -0,0 +1,82 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" +) + +// buildIovec builds an iovec slice from the given []byte slice. +func buildIovec(bufs [][]byte) (uintptr, []syscall.Iovec) { + var length uintptr + iovecs := make([]syscall.Iovec, 0, 10) + for i := range bufs { + if l := len(bufs[i]); l > 0 { + length += uintptr(l) + iovecs = append(iovecs, syscall.Iovec{ + Base: &bufs[i][0], + Len: uint64(l), + }) + } + } + return length, iovecs +} + +func fdReadVec(fd int, bufs [][]byte, control []byte, peek bool) (readLen uintptr, msgLen uintptr, controlLen uint64, err error) { + flags := uintptr(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC) + if peek { + flags |= syscall.MSG_PEEK + } + + length, iovecs := buildIovec(bufs) + + var msg syscall.Msghdr + if len(control) != 0 { + msg.Control = &control[0] + msg.Controllen = uint64(len(control)) + } + + if len(iovecs) != 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + n, _, e := syscall.RawSyscall(syscall.SYS_RECVMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), flags) + if e != 0 { + return 0, 0, 0, e + } + + if n > length { + return length, n, msg.Controllen, nil + } + + return n, n, msg.Controllen, nil +} + +func fdWriteVec(fd int, bufs [][]byte) (uintptr, error) { + _, iovecs := buildIovec(bufs) + + var msg syscall.Msghdr + if len(iovecs) > 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + n, _, e := syscall.RawSyscall(syscall.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), syscall.MSG_DONTWAIT|syscall.MSG_NOSIGNAL) + if e != 0 { + return 0, e + } + + return n, nil +} diff --git a/pkg/sentry/fs/host/util.go b/pkg/sentry/fs/host/util.go new file mode 100644 index 000000000..74c703eb7 --- /dev/null +++ b/pkg/sentry/fs/host/util.go @@ -0,0 +1,197 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "os" + "path" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func open(parent *inodeOperations, name string) (int, error) { + if parent == nil && !path.IsAbs(name) { + return -1, syserror.EINVAL + } + name = path.Clean(name) + + // Don't follow through symlinks. + flags := syscall.O_NOFOLLOW + + if fd, err := openAt(parent, name, flags|syscall.O_RDWR, 0); err == nil { + return fd, nil + } + // Retry as read-only. + if fd, err := openAt(parent, name, flags|syscall.O_RDONLY, 0); err == nil { + return fd, nil + } + + // Retry as write-only. + if fd, err := openAt(parent, name, flags|syscall.O_WRONLY, 0); err == nil { + return fd, nil + } + + // Retry as a symlink, by including O_PATH as an option. + fd, err := openAt(parent, name, linux.O_PATH|flags, 0) + if err == nil { + return fd, nil + } + + // Everything failed. + return -1, err +} + +func openAt(parent *inodeOperations, name string, flags int, perm linux.FileMode) (int, error) { + if parent == nil { + return syscall.Open(name, flags, uint32(perm)) + } + return syscall.Openat(parent.fileState.FD(), name, flags, uint32(perm)) +} + +func nodeType(s *syscall.Stat_t) fs.InodeType { + switch x := (s.Mode & syscall.S_IFMT); x { + case syscall.S_IFLNK: + return fs.Symlink + case syscall.S_IFIFO: + return fs.Pipe + case syscall.S_IFCHR: + return fs.CharacterDevice + case syscall.S_IFBLK: + return fs.BlockDevice + case syscall.S_IFSOCK: + return fs.Socket + case syscall.S_IFDIR: + return fs.Directory + case syscall.S_IFREG: + return fs.RegularFile + default: + // This shouldn't happen, but just in case... + log.Warningf("unknown host file type %d: assuming regular", x) + return fs.RegularFile + } +} + +func wouldBlock(s *syscall.Stat_t) bool { + typ := nodeType(s) + return typ == fs.Pipe || typ == fs.Socket || typ == fs.CharacterDevice +} + +func stableAttr(s *syscall.Stat_t) fs.StableAttr { + return fs.StableAttr{ + Type: nodeType(s), + DeviceID: hostFileDevice.DeviceID(), + InodeID: hostFileDevice.Map(device.MultiDeviceKey{ + Device: s.Dev, + Inode: s.Ino, + }), + BlockSize: int64(s.Blksize), + } +} + +func owner(mo *superOperations, s *syscall.Stat_t) fs.FileOwner { + // User requested no translation, just return actual owner. + if mo.dontTranslateOwnership { + return fs.FileOwner{auth.KUID(s.Uid), auth.KGID(s.Gid)} + } + + // Show only IDs relevant to the sandboxed task. I.e. if we not own the + // file, no sandboxed task can own the file. In that case, we + // use OverflowID for UID, implying that the IDs are not mapped in the + // "root" user namespace. + // + // E.g. + // sandbox's host EUID/EGID is 1/1. + // some_dir's host UID/GID is 2/1. + // Task that mounted this fs has virtualized EUID/EGID 5/5. + // + // If you executed `ls -n` in the sandboxed task, it would show: + // drwxwrxwrx [...] 65534 5 [...] some_dir + + // Files are owned by OverflowID by default. + owner := fs.FileOwner{auth.KUID(auth.OverflowUID), auth.KGID(auth.OverflowGID)} + + // If we own file on host, let mounting task's initial EUID own + // the file. + if s.Uid == hostUID { + owner.UID = mo.mounter.UID + } + + // If our group matches file's group, make file's group match + // the mounting task's initial EGID. + for _, gid := range hostGIDs { + if s.Gid == gid { + owner.GID = mo.mounter.GID + break + } + } + return owner +} + +func unstableAttr(mo *superOperations, s *syscall.Stat_t) fs.UnstableAttr { + return fs.UnstableAttr{ + Size: s.Size, + Usage: s.Blocks * 512, + Perms: fs.FilePermsFromMode(linux.FileMode(s.Mode)), + Owner: owner(mo, s), + AccessTime: ktime.FromUnix(s.Atim.Sec, s.Atim.Nsec), + ModificationTime: ktime.FromUnix(s.Mtim.Sec, s.Mtim.Nsec), + StatusChangeTime: ktime.FromUnix(s.Ctim.Sec, s.Ctim.Nsec), + Links: s.Nlink, + } +} + +type dirInfo struct { + buf []byte // buffer for directory I/O. + nbuf int // length of buf; return value from ReadDirent. + bufp int // location of next record in buf. +} + +// isBlockError unwraps os errors and checks if they are caused by EAGAIN or +// EWOULDBLOCK. This is so they can be transformed into syserror.ErrWouldBlock. +func isBlockError(err error) bool { + if err == syserror.EAGAIN || err == syserror.EWOULDBLOCK { + return true + } + if pe, ok := err.(*os.PathError); ok { + return isBlockError(pe.Err) + } + return false +} + +func hostEffectiveKIDs() (uint32, []uint32, error) { + gids, err := os.Getgroups() + if err != nil { + return 0, nil, err + } + egids := make([]uint32, len(gids)) + for i, gid := range gids { + egids[i] = uint32(gid) + } + return uint32(os.Geteuid()), append(egids, uint32(os.Getegid())), nil +} + +var hostUID uint32 +var hostGIDs []uint32 + +func init() { + hostUID, hostGIDs, _ = hostEffectiveKIDs() +} diff --git a/pkg/sentry/fs/host/util_unsafe.go b/pkg/sentry/fs/host/util_unsafe.go new file mode 100644 index 000000000..c38d2392d --- /dev/null +++ b/pkg/sentry/fs/host/util_unsafe.go @@ -0,0 +1,137 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" +) + +func createLink(fd int, name string, linkName string) error { + namePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + linkNamePtr, err := syscall.BytePtrFromString(linkName) + if err != nil { + return err + } + _, _, errno := syscall.Syscall( + syscall.SYS_SYMLINKAT, + uintptr(unsafe.Pointer(namePtr)), + uintptr(fd), + uintptr(unsafe.Pointer(linkNamePtr))) + if errno != 0 { + return errno + } + return nil +} + +func readLink(fd int) (string, error) { + // Buffer sizing copied from os.Readlink. + for l := 128; ; l *= 2 { + b := make([]byte, l) + n, _, errno := syscall.Syscall6( + syscall.SYS_READLINKAT, + uintptr(fd), + uintptr(unsafe.Pointer(syscall.StringBytePtr(""))), + uintptr(unsafe.Pointer(&b[0])), + uintptr(l), + 0, 0) + if n < 0 { + n = 0 + } + if errno != 0 { + return "", errno + } + if n < uintptr(l) { + return string(b[:n]), nil + } + } +} + +func unlinkAt(fd int, name string, dir bool) error { + namePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + var flags uintptr + if dir { + flags = linux.AT_REMOVEDIR + } + _, _, errno := syscall.Syscall( + syscall.SYS_UNLINKAT, + uintptr(fd), + uintptr(unsafe.Pointer(namePtr)), + flags, + ) + if errno != 0 { + return errno + } + return nil +} + +func timespecFromTimestamp(t ktime.Time, omit, setSysTime bool) syscall.Timespec { + if omit { + return syscall.Timespec{0, linux.UTIME_OMIT} + } + if setSysTime { + return syscall.Timespec{0, linux.UTIME_NOW} + } + return syscall.NsecToTimespec(t.Nanoseconds()) +} + +func setTimestamps(fd int, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + var sts [2]syscall.Timespec + sts[0] = timespecFromTimestamp(ts.ATime, ts.ATimeOmit, ts.ATimeSetSystemTime) + sts[1] = timespecFromTimestamp(ts.MTime, ts.MTimeOmit, ts.MTimeSetSystemTime) + _, _, errno := syscall.Syscall6( + syscall.SYS_UTIMENSAT, + uintptr(fd), + 0, /* path */ + uintptr(unsafe.Pointer(&sts)), + 0, /* flags */ + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +func fstatat(fd int, name string, flags int) (syscall.Stat_t, error) { + var stat syscall.Stat_t + namePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return stat, err + } + _, _, errno := syscall.Syscall6( + syscall.SYS_NEWFSTATAT, + uintptr(fd), + uintptr(unsafe.Pointer(namePtr)), + uintptr(unsafe.Pointer(&stat)), + uintptr(flags), + 0, 0) + if errno != 0 { + return stat, errno + } + return stat, nil +} diff --git a/pkg/sentry/fs/host/wait_test.go b/pkg/sentry/fs/host/wait_test.go new file mode 100644 index 000000000..c5f5c9c0d --- /dev/null +++ b/pkg/sentry/fs/host/wait_test.go @@ -0,0 +1,70 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package host + +import ( + "syscall" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +func TestWait(t *testing.T) { + var fds [2]int + err := syscall.Pipe(fds[:]) + if err != nil { + t.Fatalf("Unable to create pipe: %v", err) + } + + defer syscall.Close(fds[1]) + + ctx := contexttest.Context(t) + file, err := NewFile(ctx, fds[0], fs.RootOwner) + if err != nil { + syscall.Close(fds[0]) + t.Fatalf("NewFile failed: %v", err) + } + + defer file.DecRef() + + r := file.Readiness(waiter.EventIn) + if r != 0 { + t.Fatalf("File is ready for read when it shouldn't be.") + } + + e, ch := waiter.NewChannelEntry(nil) + file.EventRegister(&e, waiter.EventIn) + defer file.EventUnregister(&e) + + // Check that there are no notifications yet. + if len(ch) != 0 { + t.Fatalf("Channel is non-empty") + } + + // Write to the pipe, so it should be writable now. + syscall.Write(fds[1], []byte{1}) + + // Check that we get a notification. We need to yield the current thread + // so that the fdnotifier can deliver notifications, so we use a + // 1-second timeout instead of just checking the length of the channel. + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Channel not notified") + } +} diff --git a/pkg/sentry/fs/inode.go b/pkg/sentry/fs/inode.go new file mode 100644 index 000000000..b624f4182 --- /dev/null +++ b/pkg/sentry/fs/inode.go @@ -0,0 +1,455 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// Inode is a file system object that can be simulatenously referenced by different +// components of the VFS (Dirent, fs.File, etc). +type Inode struct { + // AtomicRefCount is our reference count. + refs.AtomicRefCount + + // InodeOperations is the file system specific behavior of the Inode. + InodeOperations InodeOperations + + // StableAttr are stable cached attributes of the Inode. + StableAttr StableAttr + + // LockCtx is the file lock context. It manages its own sychronization and tracks + // regions of the Inode that have locks held. + LockCtx LockCtx + + // Watches is the set of inotify watches for this inode. + Watches *Watches + + // MountSource is the mount source this Inode is a part of. + MountSource *MountSource + + // overlay is the overlay entry for this Inode. + overlay *overlayEntry +} + +// LockCtx is an Inode's lock context and contains different personalities of locks; both +// Posix and BSD style locks are supported. +// +// Note that in Linux fcntl(2) and flock(2) locks are _not_ cooperative, because race and +// deadlock conditions make merging them prohibitive. We do the same and keep them oblivious +// to each other but provide a "context" as a convenient container. +type LockCtx struct { + // Posix is a set of POSIX-style regional advisory locks, see fcntl(2). + Posix lock.Locks + + // BSD is a set of BSD-style advisory file wide locks, see flock(2). + BSD lock.Locks +} + +// NewInode constructs an Inode from InodeOperations, a MountSource, and stable attributes. +// +// NewInode takes a reference on msrc. +func NewInode(iops InodeOperations, msrc *MountSource, sattr StableAttr) *Inode { + msrc.IncRef() + return &Inode{ + InodeOperations: iops, + StableAttr: sattr, + Watches: newWatches(), + MountSource: msrc, + } +} + +// DecRef drops a reference on the Inode. +func (i *Inode) DecRef() { + i.DecRefWithDestructor(i.destroy) +} + +// destroy releases the Inode and releases the msrc reference taken. +func (i *Inode) destroy() { + // FIXME: Context is not plumbed here. + ctx := context.Background() + if err := i.WriteOut(ctx); err != nil { + // FIXME: Mark as warning again once noatime is + // properly supported. + log.Debugf("Inode %+v, failed to sync all metadata: %v", i.StableAttr, err) + } + + // If this inode is being destroyed because it was unlinked, queue a + // deletion event. This may not be the case for inodes being revalidated. + if i.Watches.unlinked { + i.Watches.Notify("", linux.IN_DELETE_SELF, 0) + } + + // Remove references from the watch owners to the watches on this inode, + // since the watches are about to be GCed. Note that we don't need to worry + // about the watch pins since if there were any active pins, this inode + // wouldn't be in the destructor. + i.Watches.targetDestroyed() + + // Overlay resources should be released synchronously, since they may + // trigger more Inode.destroy calls which must themselves be handled + // synchronously, like the WriteOut call above. + if i.overlay != nil { + i.overlay.release() + i.MountSource.DecRef() + return + } + + // Regular (non-overlay) resources may be released asynchronously. + Async(func() { + i.InodeOperations.Release(ctx) + i.MountSource.DecRef() + }) +} + +// Mappable calls i.InodeOperations.Mappable. +func (i *Inode) Mappable() memmap.Mappable { + if i.overlay != nil { + // In an overlay, Mappable is always implemented by + // the overlayEntry metadata to synchronize memory + // access of files with copy up. But first check if + // the Inodes involved would be mappable in the first + // place. + i.overlay.copyMu.RLock() + ok := i.overlay.isMappableLocked() + i.overlay.copyMu.RUnlock() + if !ok { + return nil + } + return i.overlay + } + return i.InodeOperations.Mappable(i) +} + +// WriteOut calls i.InodeOperations.WriteOut with i as the Inode. +func (i *Inode) WriteOut(ctx context.Context) error { + if i.overlay != nil { + return overlayWriteOut(ctx, i.overlay) + } + return i.InodeOperations.WriteOut(ctx, i) +} + +// Lookup calls i.InodeOperations.Lookup with i as the directory. +func (i *Inode) Lookup(ctx context.Context, name string) (*Dirent, error) { + if i.overlay != nil { + return overlayLookup(ctx, i.overlay, i, name) + } + return i.InodeOperations.Lookup(ctx, i, name) +} + +// Create calls i.InodeOperations.Create with i as the directory. +func (i *Inode) Create(ctx context.Context, d *Dirent, name string, flags FileFlags, perm FilePermissions) (*File, error) { + if i.overlay != nil { + return overlayCreate(ctx, i.overlay, d, name, flags, perm) + } + return i.InodeOperations.Create(ctx, i, name, flags, perm) +} + +// CreateDirectory calls i.InodeOperations.CreateDirectory with i as the directory. +func (i *Inode) CreateDirectory(ctx context.Context, d *Dirent, name string, perm FilePermissions) error { + if i.overlay != nil { + return overlayCreateDirectory(ctx, i.overlay, d, name, perm) + } + return i.InodeOperations.CreateDirectory(ctx, i, name, perm) +} + +// CreateLink calls i.InodeOperations.CreateLink with i as the directory. +func (i *Inode) CreateLink(ctx context.Context, d *Dirent, oldname string, newname string) error { + if i.overlay != nil { + return overlayCreateLink(ctx, i.overlay, d, oldname, newname) + } + return i.InodeOperations.CreateLink(ctx, i, oldname, newname) +} + +// CreateHardLink calls i.InodeOperations.CreateHardLink with i as the directory. +func (i *Inode) CreateHardLink(ctx context.Context, d *Dirent, target *Dirent, name string) error { + if i.overlay != nil { + return overlayCreateHardLink(ctx, i.overlay, d, target, name) + } + return i.InodeOperations.CreateHardLink(ctx, i, target.Inode, name) +} + +// CreateFifo calls i.InodeOperations.CreateFifo with i as the directory. +func (i *Inode) CreateFifo(ctx context.Context, d *Dirent, name string, perm FilePermissions) error { + if i.overlay != nil { + return overlayCreateFifo(ctx, i.overlay, d, name, perm) + } + return i.InodeOperations.CreateFifo(ctx, i, name, perm) +} + +// Remove calls i.InodeOperations.Remove/RemoveDirectory with i as the directory. +func (i *Inode) Remove(ctx context.Context, d *Dirent, remove *Dirent) error { + if i.overlay != nil { + return overlayRemove(ctx, i.overlay, d, remove) + } + switch remove.Inode.StableAttr.Type { + case Directory, SpecialDirectory: + return i.InodeOperations.RemoveDirectory(ctx, i, remove.name) + default: + return i.InodeOperations.Remove(ctx, i, remove.name) + } +} + +// Rename calls i.InodeOperations.Rename with the given arguments. +func (i *Inode) Rename(ctx context.Context, oldParent *Dirent, renamed *Dirent, newParent *Dirent, newName string) error { + if i.overlay != nil { + return overlayRename(ctx, i.overlay, oldParent, renamed, newParent, newName) + } + return i.InodeOperations.Rename(ctx, oldParent.Inode, renamed.name, newParent.Inode, newName) +} + +// Bind calls i.InodeOperations.Bind with i as the directory. +func (i *Inode) Bind(ctx context.Context, name string, data unix.BoundEndpoint, perm FilePermissions) error { + if i.overlay != nil { + return overlayBind(ctx, i.overlay, name, data, perm) + } + return i.InodeOperations.Bind(ctx, i, name, data, perm) +} + +// BoundEndpoint calls i.InodeOperations.BoundEndpoint with i as the Inode. +func (i *Inode) BoundEndpoint(path string) unix.BoundEndpoint { + if i.overlay != nil { + return overlayBoundEndpoint(i.overlay, path) + } + return i.InodeOperations.BoundEndpoint(i, path) +} + +// GetFile calls i.InodeOperations.GetFile with the given arguments. +func (i *Inode) GetFile(ctx context.Context, d *Dirent, flags FileFlags) (*File, error) { + if i.overlay != nil { + return overlayGetFile(ctx, i.overlay, d, flags) + } + return i.InodeOperations.GetFile(ctx, d, flags) +} + +// UnstableAttr calls i.InodeOperations.UnstableAttr with i as the Inode. +func (i *Inode) UnstableAttr(ctx context.Context) (UnstableAttr, error) { + if i.overlay != nil { + return overlayUnstableAttr(ctx, i.overlay) + } + return i.InodeOperations.UnstableAttr(ctx, i) +} + +// Getxattr calls i.InodeOperations.Getxattr with i as the Inode. +func (i *Inode) Getxattr(name string) ([]byte, error) { + if i.overlay != nil { + return overlayGetxattr(i.overlay, name) + } + return i.InodeOperations.Getxattr(i, name) +} + +// Listxattr calls i.InodeOperations.Listxattr with i as the Inode. +func (i *Inode) Listxattr() (map[string]struct{}, error) { + if i.overlay != nil { + return overlayListxattr(i.overlay) + } + return i.InodeOperations.Listxattr(i) +} + +// CheckPermission will check if the caller may access this file in the +// requested way for reading, writing, or executing. +// +// CheckPermission is like Linux's fs/namei.c:inode_permission. It +// - checks file system mount flags, +// - and utilizes InodeOperations.Check to check capabilities and modes. +func (i *Inode) CheckPermission(ctx context.Context, p PermMask) error { + // First check the outer-most mounted filesystem. + if p.Write && i.MountSource.Flags.ReadOnly { + return syserror.EROFS + } + + if i.overlay != nil { + // CheckPermission requires some special handling for + // an overlay. + // + // Writes will always be redirected to an upper filesystem, + // so ignore all lower layers being read-only. + // + // But still honor the upper-most filesystem's mount flags; + // we should not attempt to modify the writable layer if it + // is mounted read-only. + if p.Write && overlayUpperMountSource(i.MountSource).Flags.ReadOnly { + return syserror.EROFS + } + } + + return i.check(ctx, p) +} + +func (i *Inode) check(ctx context.Context, p PermMask) error { + if i.overlay != nil { + return overlayCheck(ctx, i.overlay, p) + } + if !i.InodeOperations.Check(ctx, i, p) { + return syserror.EACCES + } + return nil +} + +// SetPermissions calls i.InodeOperations.SetPermissions with i as the Inode. +func (i *Inode) SetPermissions(ctx context.Context, d *Dirent, f FilePermissions) bool { + if i.overlay != nil { + return overlaySetPermissions(ctx, i.overlay, d, f) + } + return i.InodeOperations.SetPermissions(ctx, i, f) +} + +// SetOwner calls i.InodeOperations.SetOwner with i as the Inode. +func (i *Inode) SetOwner(ctx context.Context, d *Dirent, o FileOwner) error { + if i.overlay != nil { + return overlaySetOwner(ctx, i.overlay, d, o) + } + return i.InodeOperations.SetOwner(ctx, i, o) +} + +// SetTimestamps calls i.InodeOperations.SetTimestamps with i as the Inode. +func (i *Inode) SetTimestamps(ctx context.Context, d *Dirent, ts TimeSpec) error { + if i.overlay != nil { + return overlaySetTimestamps(ctx, i.overlay, d, ts) + } + return i.InodeOperations.SetTimestamps(ctx, i, ts) +} + +// Truncate calls i.InodeOperations.Truncate with i as the Inode. +func (i *Inode) Truncate(ctx context.Context, d *Dirent, size int64) error { + if i.overlay != nil { + return overlayTruncate(ctx, i.overlay, d, size) + } + return i.InodeOperations.Truncate(ctx, i, size) +} + +// Readlink calls i.InodeOperations.Readlnk with i as the Inode. +func (i *Inode) Readlink(ctx context.Context) (string, error) { + if i.overlay != nil { + return overlayReadlink(ctx, i.overlay) + } + return i.InodeOperations.Readlink(ctx, i) +} + +// Getlink calls i.InodeOperations.Getlink. +func (i *Inode) Getlink(ctx context.Context) (*Dirent, error) { + if i.overlay != nil { + return overlayGetlink(ctx, i.overlay) + } + return i.InodeOperations.Getlink(ctx, i) +} + +// AddLink calls i.InodeOperations.AddLink. +func (i *Inode) AddLink() { + if i.overlay != nil { + // FIXME: Remove this from InodeOperations altogether. + // + // This interface (including DropLink and NotifyStatusChange) + // is only used by ramfs to update metadata of children. These + // filesystems should _never_ have overlay Inodes cached as + // children. So explicitly disallow this scenario and avoid plumbing + // Dirents through to do copy up. + panic("overlay Inodes cached in ramfs directories are not supported") + } + i.InodeOperations.AddLink() +} + +// DropLink calls i.InodeOperations.DropLink. +func (i *Inode) DropLink() { + if i.overlay != nil { + // Same as AddLink. + panic("overlay Inodes cached in ramfs directories are not supported") + } + i.InodeOperations.DropLink() +} + +// NotifyStatusChange calls i.InodeOperations.NotifyStatusChange. +func (i *Inode) NotifyStatusChange(ctx context.Context) { + if i.overlay != nil { + // Same as AddLink. + panic("overlay Inodes cached in ramfs directories are not supported") + } + i.InodeOperations.NotifyStatusChange(ctx) +} + +// IsVirtual calls i.InodeOperations.IsVirtual. +func (i *Inode) IsVirtual() bool { + if i.overlay != nil { + // An overlay configuration does not support virtual files. + return false + } + return i.InodeOperations.IsVirtual() +} + +// StatFS calls i.InodeOperations.StatFS. +func (i *Inode) StatFS(ctx context.Context) (Info, error) { + if i.overlay != nil { + return overlayStatFS(ctx, i.overlay) + } + return i.InodeOperations.StatFS(ctx) +} + +// HandleOps extracts HandleOperations from i. +func (i *Inode) HandleOps() HandleOperations { + if i.overlay != nil { + return overlayHandleOps(i.overlay) + } + if h, ok := i.InodeOperations.(HandleOperations); ok { + return h + } + return nil +} + +// CheckOwnership checks whether `ctx` owns this Inode or may act as its owner. +// Compare Linux's fs/inode.c:inode_owner_or_capable(). +func (i *Inode) CheckOwnership(ctx context.Context) bool { + uattr, err := i.UnstableAttr(ctx) + if err != nil { + return false + } + creds := auth.CredentialsFromContext(ctx) + if uattr.Owner.UID == creds.EffectiveKUID { + return true + } + if creds.HasCapability(linux.CAP_FOWNER) && creds.UserNamespace.MapFromKUID(uattr.Owner.UID).Ok() { + return true + } + return false +} + +// CheckCapability checks whether `ctx` has capability `cp` with respect to +// operations on this Inode. +// +// Compare Linux's kernel/capability.c:capable_wrt_inode_uidgid(). Note that +// this function didn't exist in Linux 3.11.10, but was added by upstream +// 23adbe12ef7d "fs,userns: Change inode_capable to capable_wrt_inode_uidgid" +// to fix local privilege escalation CVE-2014-4014. +func (i *Inode) CheckCapability(ctx context.Context, cp linux.Capability) bool { + uattr, err := i.UnstableAttr(ctx) + if err != nil { + return false + } + creds := auth.CredentialsFromContext(ctx) + if !creds.UserNamespace.MapFromKUID(uattr.Owner.UID).Ok() { + return false + } + if !creds.UserNamespace.MapFromKGID(uattr.Owner.GID).Ok() { + return false + } + return creds.HasCapability(cp) +} diff --git a/pkg/sentry/fs/inode_inotify.go b/pkg/sentry/fs/inode_inotify.go new file mode 100644 index 000000000..358bbecdf --- /dev/null +++ b/pkg/sentry/fs/inode_inotify.go @@ -0,0 +1,166 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync" +) + +// Watches is the collection of inotify watches on an inode. +type Watches struct { + // mu protects the fields below. + mu sync.RWMutex `state:"nosave"` + + // ws is the map of active watches in this collection, keyed by the inotify + // instance id of the owner. + ws map[uint64]*Watch + + // unlinked indicates whether the target inode was ever unlinked. This is a + // hack to figure out if we should queue a IN_DELETE_SELF event when this + // watches collection is being destroyed, since otherwise we have no way of + // knowing if the target inode is going down due to a deletion or + // revalidation. + unlinked bool +} + +func newWatches() *Watches { + return &Watches{ + ws: make(map[uint64]*Watch), + } +} + +// MarkUnlinked indicates the target for this set of watches to be unlinked. +// This has implications for the IN_EXCL_UNLINK flag. +func (w *Watches) MarkUnlinked() { + w.mu.Lock() + defer w.mu.Unlock() + w.unlinked = true +} + +// Lookup returns a matching watch with the given id. Returns nil if no such +// watch exists. Note that the result returned by this method only remains valid +// if the inotify instance owning the watch is locked, preventing modification +// of the returned watch and preventing the replacement of the watch by another +// one from the same instance (since there may be at most one watch per +// instance, per target). +func (w *Watches) Lookup(id uint64) *Watch { + w.mu.Lock() + defer w.mu.Unlock() + return w.ws[id] +} + +// Add adds watch into this set of watches. The watch being added must be unique +// - its ID() should not collide with any existing watches. +func (w *Watches) Add(watch *Watch) { + w.mu.Lock() + defer w.mu.Unlock() + + // Sanity check, the new watch shouldn't collide with an existing + // watch. Silently replacing an existing watch would result in a ref leak on + // this inode. We could handle this collision by calling Unpin() on the + // existing watch, but then we end up leaking watch descriptor ids at the + // inotify level. + if _, exists := w.ws[watch.ID()]; exists { + panic(fmt.Sprintf("Watch collision with ID %+v", watch.ID())) + } + w.ws[watch.ID()] = watch +} + +// Remove removes a watch with the given id from this set of watches. The caller +// is responsible for generating any watch removal event, as appropriate. The +// provided id must match an existing watch in this collection. +func (w *Watches) Remove(id uint64) { + w.mu.Lock() + defer w.mu.Unlock() + + if w.ws == nil { + // This watch set is being destroyed. The thread executing the + // destructor is already in the process of deleting all our watches. We + // got here with no refs on the inode because we raced with the + // destructor notifying all the watch owners of the inode's destruction. + // See the comment in Watches.TargetDestroyed for why this race exists. + return + } + + watch, ok := w.ws[id] + if !ok { + // While there's technically no problem with silently ignoring a missing + // watch, this is almost certainly a bug. + panic(fmt.Sprintf("Attempt to remove a watch, but no watch found with provided id %+v.", id)) + } + delete(w.ws, watch.ID()) +} + +// Notify queues a new event with all watches in this set. +func (w *Watches) Notify(name string, events, cookie uint32) { + // N.B. We don't defer the unlocks because Notify is in the hot path of + // all IO operations, and the defer costs too much for small IO + // operations. + w.mu.RLock() + for _, watch := range w.ws { + if name != "" && w.unlinked && !watch.NotifyParentAfterUnlink() { + // IN_EXCL_UNLINK - By default, when watching events on the children + // of a directory, events are generated for children even after they + // have been unlinked from the directory. This can result in large + // numbers of uninteresting events for some applications (e.g., if + // watching /tmp, in which many applications create temporary files + // whose names are immediately unlinked). Specifying IN_EXCL_UNLINK + // changes the default behavior, so that events are not generated + // for children after they have been unlinked from the watched + // directory. -- inotify(7) + // + // We know we're dealing with events for a parent when the name + // isn't empty. + continue + } + watch.Notify(name, events, cookie) + } + w.mu.RUnlock() +} + +// Unpin unpins dirent from all watches in this set. +func (w *Watches) Unpin(d *Dirent) { + w.mu.RLock() + defer w.mu.RUnlock() + for _, watch := range w.ws { + watch.Unpin(d) + } +} + +// targetDestroyed is called by the inode destructor to notify the watch owners +// of the impending destruction of the watch target. +func (w *Watches) targetDestroyed() { + var ws map[uint64]*Watch + + // We can't hold w.mu while calling watch.TargetDestroyed to preserve lock + // ordering w.r.t to the owner inotify instances. Instead, atomically move + // the watches map into a local variable so we can iterate over it safely. + // + // Because of this however, it is possible for the watches' owners to reach + // this inode while the inode has no refs. This is still safe because the + // owners can only reach the inode until this function finishes calling + // watch.TargetDestroyed() below and the inode is guaranteed to exist in the + // meanwhile. But we still have to be very careful not to rely on inode + // state that may have been already destroyed. + w.mu.Lock() + ws = w.ws + w.ws = nil + w.mu.Unlock() + + for _, watch := range ws { + watch.TargetDestroyed() + } +} diff --git a/pkg/sentry/fs/inode_operations.go b/pkg/sentry/fs/inode_operations.go new file mode 100644 index 000000000..b33980178 --- /dev/null +++ b/pkg/sentry/fs/inode_operations.go @@ -0,0 +1,385 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "errors" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +var ( + // ErrResolveViaReadlink is a special error value returned by + // InodeOperations.Getlink() to indicate that a link should be + // resolved automatically by walking to the path returned by + // InodeOperations.Readlink(). + ErrResolveViaReadlink = errors.New("link should be resolved via Readlink()") +) + +// TimeSpec contains access and modification timestamps. If either ATimeOmit or +// MTimeOmit is true, then the corresponding timestamp should not be updated. +// If either ATimeSetSystemTime or MTimeSetSystemTime are set then the +// corresponding timestamp should be ignored and the time will be set to the +// current system time. +type TimeSpec struct { + ATime ktime.Time + ATimeOmit bool + ATimeSetSystemTime bool + MTime ktime.Time + MTimeOmit bool + MTimeSetSystemTime bool +} + +// InodeOperations are operations on an Inode that diverge per file system. +// +// Objects that implement InodeOperations may cache file system "private" +// data that is useful for implementing these methods. In contrast, Inode +// contains state that is common to all Inodes; this state may be optionally +// used by InodeOperations. An object that implements InodeOperations may +// not take a reference on an Inode. +type InodeOperations interface { + // Release releases all private file system data held by this object. + // Once Release is called, this object is dead (no other methods will + // ever be called). + Release(context.Context) + + // Lookup loads an Inode at name under dir into a Dirent. The name + // is a valid component path: it contains no "/"s nor is the empty + // string. + // + // Lookup may return one of: + // + // * A nil Dirent and a non-nil error. If the reason that Lookup failed + // was because the name does not exist under Inode, then must return + // syserror.ENOENT. + // + // * If name does not exist under dir and the file system wishes this + // fact to be cached, a non-nil Dirent containing a nil Inode and a + // nil error. This is a negative Dirent and must have exactly one + // reference (at-construction reference). + // + // * If name does exist under this dir, a non-nil Dirent containing a + // non-nil Inode, and a nil error. File systems that take extra + // references on this Dirent should implement DirentOperations. + Lookup(ctx context.Context, dir *Inode, name string) (*Dirent, error) + + // Create creates an Inode at name under dir and returns a new File + // whose Dirent backs the new Inode. Implementations must ensure that + // name does not already exist. Create may return one of: + // + // * A nil File and a non-nil error. + // + // * A non-nil File and a nil error. File.Dirent will be a new Dirent, + // with a single reference held by File. File systems that take extra + // references on this Dirent should implement DirentOperations. + // + // The caller must ensure that this operation is permitted. + Create(ctx context.Context, dir *Inode, name string, flags FileFlags, perm FilePermissions) (*File, error) + + // CreateDirectory creates a new directory under this dir. + // CreateDirectory should otherwise do the same as Create. + // + // The caller must ensure that this operation is permitted. + CreateDirectory(ctx context.Context, dir *Inode, name string, perm FilePermissions) error + + // CreateLink creates a symbolic link under dir between newname + // and oldname. CreateLink should otherwise do the same as Create. + // + // The caller must ensure that this operation is permitted. + CreateLink(ctx context.Context, dir *Inode, oldname string, newname string) error + + // CreateHardLink creates a hard link under dir between the target + // Inode and name. Implementations must ensure that name does not + // already exist. + // + // The caller must ensure this operation is permitted. + CreateHardLink(ctx context.Context, dir *Inode, target *Inode, name string) error + + // CreateFifo creates a new named pipe under dir at name. + // Implementations must ensure that an Inode at name does not + // already exist. + // + // The caller must ensure that this operation is permitted. + CreateFifo(ctx context.Context, dir *Inode, name string, perm FilePermissions) error + + // Remove removes the given named non-directory under dir. + // + // The caller must ensure that this operation is permitted. + // + // TODO: merge Remove and RemoveDirectory, Remove + // just needs a type flag. + Remove(ctx context.Context, dir *Inode, name string) error + + // RemoveDirectory removes the given named directory under dir. + // + // The caller must ensure that this operation is permitted. + // + // RemoveDirectory should check that the directory to be + // removed is empty. + RemoveDirectory(ctx context.Context, dir *Inode, name string) error + + // Rename atomically renames oldName under oldParent to newName + // under newParent where oldParent and newParent are directories. + // + // Implementations are responsible for rejecting renames that + // replace non-empty directories. + Rename(ctx context.Context, oldParent *Inode, oldName string, newParent *Inode, newName string) error + + // Bind binds a new socket under dir at the given name. + // Implementations must ensure that name does not already exist. + // + // The caller must ensure that this operation is permitted. + Bind(ctx context.Context, dir *Inode, name string, data unix.BoundEndpoint, perm FilePermissions) error + + // BoundEndpoint returns the socket endpoint at path stored in + // or generated by an Inode. + // + // The path is only relevant for generated endpoint because stored + // endpoints already know their path. It is ok for the endpoint to + // hold onto their path because the only way to change a bind + // address is to rebind the socket. + // + // This is valid iff the type of the Inode is a Socket, which + // generally implies that this Inode was created via CreateSocket. + // + // If there is no socket endpoint available, nil will be returned. + BoundEndpoint(inode *Inode, path string) unix.BoundEndpoint + + // GetFile returns a new open File backed by a Dirent and FileFlags. + // It may block as long as it is done with ctx. + // + // The returned File will uniquely back an application fd. + GetFile(ctx context.Context, d *Dirent, flags FileFlags) (*File, error) + + // UnstableAttr returns the most up-to-date "unstable" attributes of + // an Inode, where "unstable" means that they change in response to + // file system events. + UnstableAttr(ctx context.Context, inode *Inode) (UnstableAttr, error) + + // Getxattr retrieves the value of extended attribute name. Inodes that + // do not support extended attributes return EOPNOTSUPP. Inodes that + // support extended attributes but don't have a value at name return + // ENODATA. + Getxattr(inode *Inode, name string) ([]byte, error) + + // Setxattr sets the value of extended attribute name. Inodes that + // do not support extended attributes return EOPNOTSUPP. + Setxattr(inode *Inode, name string, value []byte) error + + // Listxattr returns the set of all extended attributes names that + // have values. Inodes that do not support extended attributes return + // EOPNOTSUPP. + Listxattr(inode *Inode) (map[string]struct{}, error) + + // Check determines whether an Inode can be accessed with the + // requested permission mask using the context (which gives access + // to Credentials and UserNamespace). + Check(ctx context.Context, inode *Inode, p PermMask) bool + + // SetPermissions sets new permissions for an Inode. Returns false + // if it was not possible to set the new permissions. + // + // The caller must ensure that this operation is permitted. + SetPermissions(ctx context.Context, inode *Inode, f FilePermissions) bool + + // SetOwner sets the ownership for this file. + // + // If either UID or GID are set to auth.NoID, its value will not be + // changed. + // + // The caller must ensure that this operation is permitted. + SetOwner(ctx context.Context, inode *Inode, owner FileOwner) error + + // SetTimestamps sets the access and modification timestamps of an + // Inode according to the access and modification times in the TimeSpec. + // + // If either ATimeOmit or MTimeOmit is set, then the corresponding + // timestamp is not updated. + // + // If either ATimeSetSystemTime or MTimeSetSystemTime is true, that + // timestamp is set to the current time instead. + // + // The caller must ensure that this operation is permitted. + SetTimestamps(ctx context.Context, inode *Inode, ts TimeSpec) error + + // Truncate changes the size of an Inode. Truncate should not check + // permissions internally, as it is used for both sys_truncate and + // sys_ftruncate. + // + // Implementations need not check that length >= 0. + Truncate(ctx context.Context, inode *Inode, size int64) error + + // WriteOut writes cached Inode state to a backing filesystem in a + // synchronous manner. + // + // File systems that do not cache metadata or data via an Inode + // implement WriteOut as a no-op. File systems that are entirely in + // memory also implement WriteOut as a no-op. Otherwise file systems + // call Inode.Sync to write back page cached data and cached metadata + // followed by syncing writeback handles. + // + // It derives from include/linux/fs.h:super_operations->write_inode. + WriteOut(ctx context.Context, inode *Inode) error + + // Readlink reads the symlink path of an Inode. + // + // Readlink is permitted to return a different path depending on ctx, + // the request originator. + // + // The caller must ensure that this operation is permitted. + // + // Readlink should check that Inode is a symlink and its content is + // at least readable. + Readlink(ctx context.Context, inode *Inode) (string, error) + + // Getlink resolves a symlink to a target *Dirent. + // + // Filesystems that can resolve the link by walking to the path returned + // by Readlink should return (nil, ErrResolveViaReadlink), which + // triggers link resolution via Realink and Lookup. + // + // Some links cannot be followed by Lookup. In this case, Getlink can + // return the Dirent of the link target. The caller holds a reference + // to the Dirent. Filesystems that return a non-nil *Dirent from Getlink + // cannot participate in an overlay because it is impossible for the + // overlay to ascertain whether or not the *Dirent should contain an + // overlayEntry. + // + // Any error returned from Getlink other than ErrResolveViaReadlink + // indicates the caller's inability to traverse this Inode as a link + // (e.g. syserror.ENOLINK indicates that the Inode is not a link, + // syscall.EPERM indicates that traversing the link is not allowed, etc). + Getlink(context.Context, *Inode) (*Dirent, error) + + // Mappable returns a memmap.Mappable that provides memory mappings of the + // Inode's data. Mappable may return nil if this is not supported. The + // returned Mappable must remain valid until InodeOperations.Release is + // called. + Mappable(*Inode) memmap.Mappable + + // The below methods require cleanup. + + // AddLink increments the hard link count of an Inode. + // + // Remove in favor of Inode.IncLink. + AddLink() + + // DropLink decrements the hard link count of an Inode. + // + // Remove in favor of Inode.DecLink. + DropLink() + + // NotifyStatusChange sets the status change time to the current time. + // + // Remove in favor of updating the Inode's cached status change time. + NotifyStatusChange(ctx context.Context) + + // IsVirtual indicates whether or not this corresponds to a virtual + // resource. + // + // If IsVirtual returns true, then caching will be disabled for this + // node, and fs.Dirent.Freeze() will not stop operations on the node. + // + // Remove in favor of freezing specific mounts. + IsVirtual() bool + + // StatFS returns a filesystem Info implementation or an error. If + // the filesystem does not support this operation (maybe in the future + // it will), then ENOSYS should be returned. + // + // Move to MountSourceOperations. + StatFS(context.Context) (Info, error) + + HandleOperations +} + +// HandleOperations are extended InodeOperations that are only implemented +// for file systems that use fs/handle.go:Handle to generate open Files. +// +// Handle is deprecated; these methods are deprecated as well. +// +// Filesystems are encouraged to implement the File interface directly +// instead of using Handle. To indicate that the below methods should never +// be called, embed DeprecatedFileOperations to satisfy this interface. +type HandleOperations interface { + waiter.Waitable + + // DeprecatedPreadv is deprecated in favor of filesystems + // implementing File.Preadv directly. + // + // DeprecatedPreadv reads up to dst.NumBytes() bytes into dst, starting at + // the given offset, and returns the number of bytes read. + // + // Preadv may return a partial read result before EOF is reached. + // + // If a symlink, Preadv reads the target value of the symlink. + // + // Preadv should not check for readable permissions. + DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) + + // DeprecatedPwritev is deprecated in favor of filesystems + // implementing File.Pwritev directly. + // + // DeprecatedPwritev writes up to src.NumBytes() bytes from src to the + // Inode, starting at the given offset and returns the number of bytes + // written. + // + // Pwritev should not check that the Inode has writable permissions. + DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) + + // DeprecatedReaddir is deprecated in favor of filesystems + // implementing File.Readdir directly. + // + // DeprecatedReaddir emits directory entries by calling dirCtx.EmitDir, + // beginning with the entry at offset. + // + // Entries for "." and ".." must *not* be included. + // + // If the offset returned is the same as the argument offset, then + // nothing has been serialized. This is equivalent to reaching EOF. + // In this case serializer.Written() should return 0. + // + // The order of entries to emit must be consistent between Readdir + // calls, and must start with the given offset. + // + // The caller must ensure that this operation is permitted. + DeprecatedReaddir(ctx context.Context, dirCtx *DirCtx, offset int) (int, error) + + // DeprecatedFsync is deprecated in favor of filesystems implementing + // File.Fsync directly. + // + // DeprecatedFsync syncs a file. + DeprecatedFsync() error + + // DeprecatedMappable is deprecated in favor of filesystems implementing + // File.Mappable directly. + // + // DeprecatedMappable returns a Mappable if the Inode can be mapped. + DeprecatedMappable(ctx context.Context, inode *Inode) (memmap.Mappable, bool) + + // DeprecatedFlush is deprecated in favor of filesystems implementing + // File.Flush directly. + // + // DeprecatedFlush flushes a file. + // + // Implementations may choose to free up memory or complete pending I/O + // but also may implement Flush as a no-op. + DeprecatedFlush() error +} diff --git a/pkg/sentry/fs/inode_overlay.go b/pkg/sentry/fs/inode_overlay.go new file mode 100644 index 000000000..343150bb8 --- /dev/null +++ b/pkg/sentry/fs/inode_overlay.go @@ -0,0 +1,555 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "strings" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +func overlayHasWhiteout(parent *Inode, name string) bool { + buf, err := parent.Getxattr(XattrOverlayWhiteout(name)) + return err == nil && string(buf) == "y" +} + +func overlayCreateWhiteout(parent *Inode, name string) error { + return parent.InodeOperations.Setxattr(parent, XattrOverlayWhiteout(name), []byte("y")) +} + +func overlayWriteOut(ctx context.Context, o *overlayEntry) error { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper == nil { + return nil + } + return o.upper.InodeOperations.WriteOut(ctx, o.upper) +} + +func overlayLookup(ctx context.Context, parent *overlayEntry, inode *Inode, name string) (*Dirent, error) { + parent.copyMu.RLock() + defer parent.copyMu.RUnlock() + + // Assert that there is at least one upper or lower entry. + if parent.upper == nil && parent.lower == nil { + panic("invalid overlayEntry, needs at least one Inode") + } + + var upperInode *Inode + var lowerInode *Inode + + // Does the parent directory exist in the upper file system? + if parent.upper != nil { + // First check if a file object exists in the upper file system. + // A file could have been created over a whiteout, so we need to + // check if something exists in the upper file system first. + child, err := parent.upper.Lookup(ctx, name) + if err != nil && err != syserror.ENOENT { + // We encountered an error that an overlay cannot handle, + // we must propagate it to the caller. + return nil, err + } + if child != nil { + defer child.DecRef() + + // Is the child non-negative? + if !child.IsNegative() { + upperInode = child.Inode + upperInode.IncRef() + } + } + + // Are we done? + if overlayHasWhiteout(parent.upper, name) { + if upperInode == nil { + return NewNegativeDirent(name), nil + } + entry, err := newOverlayEntry(ctx, upperInode, nil, false) + if err != nil { + // Don't leak resources. + upperInode.DecRef() + return nil, err + } + return NewDirent(newOverlayInode(ctx, entry, inode.MountSource), name), nil + } + } + + // Check the lower file system. We do this unconditionally (even for + // non-directories) because we may need to use stable attributes from + // the lower filesystem (e.g. device number, inode number) that were + // visible before a copy up. + if parent.lower != nil { + // Check the lower file system. + child, err := parent.lower.Lookup(ctx, name) + // Same song and dance as above. + if err != nil && err != syserror.ENOENT { + // Don't leak resources. + if upperInode != nil { + upperInode.DecRef() + } + return nil, err + } + if child != nil { + defer child.DecRef() + + // Is the child negative? + if !child.IsNegative() { + // Did we find something in the upper filesystem? We can + // only use it if the types match. + if upperInode == nil || upperInode.StableAttr.Type == child.Inode.StableAttr.Type { + lowerInode = child.Inode + lowerInode.IncRef() + } + } + } + } + + // Was all of this for naught? + if upperInode == nil && lowerInode == nil { + // Return a negative Dirent indicating that nothing was found. + return NewNegativeDirent(name), nil + } + + // Did we find a lower Inode? Remember this because we may decide we don't + // actually need the lower Inode (see below). + lowerExists := lowerInode != nil + + // If we found something in the upper filesystem and the lower filesystem, + // use the stable attributes from the lower filesystem. If we don't do this, + // then it may appear that the file was magically recreated across copy up. + if upperInode != nil && lowerInode != nil { + // Steal attributes. + upperInode.StableAttr = lowerInode.StableAttr + + // For non-directories, the lower filesystem resource is strictly + // unnecessary because we don't need to copy-up and we will always + // operate (e.g. read/write) on the upper Inode. + if !IsDir(upperInode.StableAttr) { + lowerInode.DecRef() + lowerInode = nil + } + } + + // Phew, finally done. + entry, err := newOverlayEntry(ctx, upperInode, lowerInode, lowerExists) + if err != nil { + // Well, not quite, we failed at the last moment, how depressing. + // Be sure not to leak resources. + if upperInode != nil { + upperInode.DecRef() + } + if lowerInode != nil { + lowerInode.DecRef() + } + return nil, err + } + return NewDirent(newOverlayInode(ctx, entry, inode.MountSource), name), nil +} + +func overlayCreate(ctx context.Context, o *overlayEntry, parent *Dirent, name string, flags FileFlags, perm FilePermissions) (*File, error) { + // Dirent.Create takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return nil, err + } + + upperFile, err := o.upper.InodeOperations.Create(ctx, o.upper, name, flags, perm) + if err != nil { + return nil, err + } + + // Take another reference on the upper file's inode, which will be + // owned by the overlay entry. + upperFile.Dirent.Inode.IncRef() + entry, err := newOverlayEntry(ctx, upperFile.Dirent.Inode, nil, false) + if err != nil { + cleanupUpper(ctx, o.upper, name) + return nil, err + } + + // NOTE: Replace the Dirent with a transient Dirent, since + // we are about to create the real Dirent: an overlay Dirent. + // + // This ensures the *fs.File returned from overlayCreate is in the same + // state as the *fs.File returned by overlayGetFile, where the upper + // file has a transient Dirent. + // + // This is necessary for Save/Restore, as otherwise the upper Dirent + // (which has no path as it is unparented and never reachable by the + // user) will clobber the real path for the underlying Inode. + upperFile.Dirent.Inode.IncRef() + upperDirent := NewTransientDirent(upperFile.Dirent.Inode) + upperFile.Dirent.DecRef() + upperFile.Dirent = upperDirent + + // Create the overlay inode and dirent. We need this to construct the + // overlay file. + overlayInode := newOverlayInode(ctx, entry, parent.Inode.MountSource) + // d will own the inode reference. + overlayDirent := NewDirent(overlayInode, name) + // The overlay file created below with NewFile will take a reference on + // the overlayDirent, and it should be the only thing holding a + // reference at the time of creation, so we must drop this reference. + defer overlayDirent.DecRef() + + // Create a new overlay file that wraps the upper file. + flags.Pread = upperFile.Flags().Pread + flags.Pwrite = upperFile.Flags().Pwrite + overlayFile := NewFile(ctx, overlayDirent, flags, &overlayFileOperations{upper: upperFile}) + + return overlayFile, nil +} + +func overlayCreateDirectory(ctx context.Context, o *overlayEntry, parent *Dirent, name string, perm FilePermissions) error { + // Dirent.CreateDirectory takes renameMu if the Inode is an overlay + // Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + return o.upper.InodeOperations.CreateDirectory(ctx, o.upper, name, perm) +} + +func overlayCreateLink(ctx context.Context, o *overlayEntry, parent *Dirent, oldname string, newname string) error { + // Dirent.CreateLink takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + return o.upper.InodeOperations.CreateLink(ctx, o.upper, oldname, newname) +} + +func overlayCreateHardLink(ctx context.Context, o *overlayEntry, parent *Dirent, target *Dirent, name string) error { + // Dirent.CreateHardLink takes renameMu if the Inode is an overlay + // Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + if err := copyUpLockedForRename(ctx, target); err != nil { + return err + } + return o.upper.InodeOperations.CreateHardLink(ctx, o.upper, target.Inode.overlay.upper, name) +} + +func overlayCreateFifo(ctx context.Context, o *overlayEntry, parent *Dirent, name string, perm FilePermissions) error { + // Dirent.CreateFifo takes renameMu if the Inode is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + return o.upper.InodeOperations.CreateFifo(ctx, o.upper, name, perm) +} + +func overlayRemove(ctx context.Context, o *overlayEntry, parent *Dirent, child *Dirent) error { + // Dirent.Remove and Dirent.RemoveDirectory take renameMu if the Inode + // is an overlay Inode. + if err := copyUpLockedForRename(ctx, parent); err != nil { + return err + } + child.Inode.overlay.copyMu.RLock() + defer child.Inode.overlay.copyMu.RUnlock() + if child.Inode.overlay.upper != nil { + if child.Inode.StableAttr.Type == Directory { + if err := o.upper.InodeOperations.RemoveDirectory(ctx, o.upper, child.name); err != nil { + return err + } + } else { + if err := o.upper.InodeOperations.Remove(ctx, o.upper, child.name); err != nil { + return err + } + } + } + if child.Inode.overlay.lowerExists { + return overlayCreateWhiteout(o.upper, child.name) + } + return nil +} + +func overlayRename(ctx context.Context, o *overlayEntry, oldParent *Dirent, renamed *Dirent, newParent *Dirent, newName string) error { + // To be able to copy these up below, they have to be part of an + // overlay file system. + // + // Maybe some day we can allow the more complicated case of + // non-overlay X overlay renames, but that's not necessary right now. + if renamed.Inode.overlay == nil || newParent.Inode.overlay == nil || oldParent.Inode.overlay == nil { + return syserror.EXDEV + } + + // Check here if the file to be replaced exists and is a non-empty + // directory. If we copy up first, we may end up copying the directory + // but none of its children, so the directory will appear empty in the + // upper fs, which will then allow the rename to proceed when it should + // return ENOTEMPTY. + replaced, err := newParent.Inode.Lookup(ctx, newName) + if err != nil && err != syserror.ENOENT { + return err + } + if err == nil && !replaced.IsNegative() && IsDir(replaced.Inode.StableAttr) { + children, err := readdirOne(ctx, replaced) + if err != nil { + return err + } + + // readdirOne ensures that "." and ".." are not + // included among the returned children, so we don't + // need to bother checking for them. + if len(children) > 0 { + return syserror.ENOTEMPTY + } + } + if err := copyUpLockedForRename(ctx, renamed); err != nil { + return err + } + if err := copyUpLockedForRename(ctx, newParent); err != nil { + return err + } + oldName := renamed.name + if err := o.upper.InodeOperations.Rename(ctx, oldParent.Inode.overlay.upper, oldName, newParent.Inode.overlay.upper, newName); err != nil { + return err + } + if renamed.Inode.overlay.lowerExists { + return overlayCreateWhiteout(oldParent.Inode.overlay.upper, oldName) + } + return nil +} + +func overlayBind(ctx context.Context, o *overlayEntry, name string, data unix.BoundEndpoint, perm FilePermissions) error { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + // We do not support doing anything exciting with sockets unless there + // is already a directory in the upper filesystem. + if o.upper == nil { + return syserror.EOPNOTSUPP + } + return o.upper.InodeOperations.Bind(ctx, o.upper, name, data, perm) +} + +func overlayBoundEndpoint(o *overlayEntry, path string) unix.BoundEndpoint { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + return o.upper.InodeOperations.BoundEndpoint(o.upper, path) + } + // If a socket is already in the lower file system, allow connections + // to it. + return o.lower.InodeOperations.BoundEndpoint(o.lower, path) +} + +func overlayGetFile(ctx context.Context, o *overlayEntry, d *Dirent, flags FileFlags) (*File, error) { + if flags.Write { + if err := copyUp(ctx, d); err != nil { + return nil, err + } + } + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + upper, err := overlayFile(ctx, o.upper, flags) + if err != nil { + return nil, err + } + flags.Pread = upper.Flags().Pread + flags.Pwrite = upper.Flags().Pwrite + return NewFile(ctx, d, flags, &overlayFileOperations{upper: upper}), nil + } + + lower, err := overlayFile(ctx, o.lower, flags) + if err != nil { + return nil, err + } + flags.Pread = lower.Flags().Pread + flags.Pwrite = lower.Flags().Pwrite + return NewFile(ctx, d, flags, &overlayFileOperations{lower: lower}), nil +} + +func overlayUnstableAttr(ctx context.Context, o *overlayEntry) (UnstableAttr, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.UnstableAttr(ctx) + } + return o.lower.UnstableAttr(ctx) +} + +func overlayGetxattr(o *overlayEntry, name string) ([]byte, error) { + // Don't forward the value of the extended attribute if it would + // unexpectedly change the behavior of a wrapping overlay layer. + if strings.HasPrefix(XattrOverlayPrefix, name) { + return nil, syserror.ENODATA + } + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.Getxattr(name) + } + return o.lower.Getxattr(name) +} + +func overlayListxattr(o *overlayEntry) (map[string]struct{}, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + var names map[string]struct{} + var err error + if o.upper != nil { + names, err = o.upper.Listxattr() + } else { + names, err = o.lower.Listxattr() + } + for name := range names { + // Same as overlayGetxattr, we shouldn't forward along + // overlay attributes. + if strings.HasPrefix(XattrOverlayPrefix, name) { + delete(names, name) + } + } + return names, err +} + +func overlayCheck(ctx context.Context, o *overlayEntry, p PermMask) error { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.check(ctx, p) + } + if p.Write { + // Since writes will be redirected to the upper filesystem, the lower + // filesystem need not be writable, but must be readable for copy-up. + p.Write = false + p.Read = true + } + return o.lower.check(ctx, p) +} + +func overlaySetPermissions(ctx context.Context, o *overlayEntry, d *Dirent, f FilePermissions) bool { + if err := copyUp(ctx, d); err != nil { + return false + } + return o.upper.InodeOperations.SetPermissions(ctx, o.upper, f) +} + +func overlaySetOwner(ctx context.Context, o *overlayEntry, d *Dirent, owner FileOwner) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.SetOwner(ctx, o.upper, owner) +} + +func overlaySetTimestamps(ctx context.Context, o *overlayEntry, d *Dirent, ts TimeSpec) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.SetTimestamps(ctx, o.upper, ts) +} + +func overlayTruncate(ctx context.Context, o *overlayEntry, d *Dirent, size int64) error { + if err := copyUp(ctx, d); err != nil { + return err + } + return o.upper.InodeOperations.Truncate(ctx, o.upper, size) +} + +func overlayReadlink(ctx context.Context, o *overlayEntry) (string, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.Readlink(ctx) + } + return o.lower.Readlink(ctx) +} + +func overlayGetlink(ctx context.Context, o *overlayEntry) (*Dirent, error) { + var dirent *Dirent + var err error + + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + if o.upper != nil { + dirent, err = o.upper.Getlink(ctx) + } else { + dirent, err = o.lower.Getlink(ctx) + } + if dirent != nil { + // This dirent is likely bogus (its Inode likely doesn't contain + // the right overlayEntry). So we're forced to drop it on the + // ground and claim that jumping around the filesystem like this + // is not supported. + name, _ := dirent.FullName(nil) + dirent.DecRef() + + // Claim that the path is not accessible. + err = syserror.EACCES + log.Warningf("Getlink not supported in overlay for %q", name) + } + return nil, err +} + +func overlayStatFS(ctx context.Context, o *overlayEntry) (Info, error) { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + + var i Info + var err error + if o.upper != nil { + i, err = o.upper.StatFS(ctx) + } else { + i, err = o.lower.StatFS(ctx) + } + if err != nil { + return Info{}, err + } + + i.Type = linux.OVERLAYFS_SUPER_MAGIC + + return i, nil +} + +func overlayHandleOps(o *overlayEntry) HandleOperations { + o.copyMu.RLock() + defer o.copyMu.RUnlock() + if o.upper != nil { + return o.upper.HandleOps() + } + return o.lower.HandleOps() +} + +// NewTestOverlayDir returns an overlay Inode for tests. +func NewTestOverlayDir(ctx context.Context, upper *Inode, lower *Inode) *Inode { + fs := &overlayFilesystem{} + msrc := NewMountSource(&overlayMountSourceOperations{ + upper: NewNonCachingMountSource(fs, MountSourceFlags{}), + lower: NewNonCachingMountSource(fs, MountSourceFlags{}), + }, fs, MountSourceFlags{}) + overlay := &overlayEntry{ + upper: upper, + lower: lower, + } + return newOverlayInode(ctx, overlay, msrc) +} + +// TestHasUpperFS returns true if i is an overlay Inode and it has a pointer +// to an Inode on an upper filesystem. +func (i *Inode) TestHasUpperFS() bool { + return i.overlay != nil && i.overlay.upper != nil +} + +// TestHasLowerFS returns true if i is an overlay Inode and it has a pointer +// to an Inode on a lower filesystem. +func (i *Inode) TestHasLowerFS() bool { + return i.overlay != nil && i.overlay.lower != nil +} diff --git a/pkg/sentry/fs/inode_overlay_test.go b/pkg/sentry/fs/inode_overlay_test.go new file mode 100644 index 000000000..684d54bd2 --- /dev/null +++ b/pkg/sentry/fs/inode_overlay_test.go @@ -0,0 +1,251 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func TestLookup(t *testing.T) { + ctx := contexttest.Context(t) + for _, test := range []struct { + // Test description. + desc string + + // Lookup parameters. + dir *fs.Inode + name string + + // Want from lookup. + err error + found bool + hasUpper bool + hasLower bool + }{ + { + desc: "no upper, lower has name", + dir: fs.NewTestOverlayDir(ctx, + nil, /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + { + desc: "no lower, upper has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + nil, /* lower */ + ), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, only lower has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "b", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + { + desc: "upper and lower, only upper has name", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "b", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, both have file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: true, + hasLower: false, + }, + { + desc: "upper and lower, both have directory", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: true, + }, + }, nil), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: true, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: true, + hasLower: true, + }, + { + desc: "upper and lower, upper negative masks lower file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, nil, []string{"a"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: false, + hasUpper: false, + hasLower: false, + }, + { + desc: "upper and lower, upper negative does not mask lower file", + dir: fs.NewTestOverlayDir(ctx, + newTestRamfsDir(ctx, nil, []string{"b"}), /* upper */ + newTestRamfsDir(ctx, []dirContent{ + { + name: "a", + dir: false, + }, + }, nil), /* lower */ + ), + name: "a", + found: true, + hasUpper: false, + hasLower: true, + }, + } { + t.Run(test.desc, func(t *testing.T) { + dirent, err := test.dir.Lookup(ctx, test.name) + if err != test.err { + t.Fatalf("lookup got error %v, want %v", err, test.err) + } + if test.found && dirent.IsNegative() { + t.Fatalf("lookup expected to find %q, got negative dirent", test.name) + } + if !test.found { + return + } + if hasUpper := dirent.Inode.TestHasUpperFS(); hasUpper != test.hasUpper { + t.Fatalf("lookup got upper filesystem %v, want %v", hasUpper, test.hasUpper) + } + if hasLower := dirent.Inode.TestHasLowerFS(); hasLower != test.hasLower { + t.Errorf("lookup got lower filesystem %v, want %v", hasLower, test.hasLower) + } + }) + } +} + +type dir struct { + fs.InodeOperations + + // list of negative child names. + negative []string +} + +func (d *dir) Getxattr(inode *fs.Inode, name string) ([]byte, error) { + for _, n := range d.negative { + if name == fs.XattrOverlayWhiteout(n) { + return []byte("y"), nil + } + } + return nil, syserror.ENOATTR +} + +type dirContent struct { + name string + dir bool +} + +func newTestRamfsDir(ctx context.Context, contains []dirContent, negative []string) *fs.Inode { + msrc := fs.NewCachingMountSource(nil, fs.MountSourceFlags{}) + contents := make(map[string]*fs.Inode) + for _, c := range contains { + if c.dir { + contents[c.name] = newTestRamfsDir(ctx, nil, nil) + } else { + contents[c.name] = fs.NewInode(ramfstest.NewFile(ctx, fs.FilePermissions{}), msrc, fs.StableAttr{Type: fs.RegularFile}) + } + } + dops := ramfstest.NewDir(ctx, contents, fs.FilePermissions{ + User: fs.PermMask{Read: true, Execute: true}, + }) + return fs.NewInode(&dir{ + InodeOperations: dops, + negative: negative, + }, msrc, fs.StableAttr{Type: fs.Directory}) +} diff --git a/pkg/sentry/fs/inotify.go b/pkg/sentry/fs/inotify.go new file mode 100644 index 000000000..9f50cb800 --- /dev/null +++ b/pkg/sentry/fs/inotify.go @@ -0,0 +1,329 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Inotify represents an inotify instance created by inotify_init(2) or +// inotify_init1(2). Inotify implements the FileOperations interface. +// +// Lock ordering: +// Inotify.mu -> Inode.Watches.mu -> Watch.mu -> Inotify.evMu +type Inotify struct { + // Unique identifier for this inotify instance. We don't just reuse the + // inotify fd because fds can be duped. These should not be exposed to the + // user, since we may aggressively reuse an id on S/R. + id uint64 + + // evMu *only* protects the event queue. We need a separate lock because + // while queuing events, a watch needs to lock the event queue, and using mu + // for that would violate lock ordering since at that point the calling + // goroutine already holds Watch.target.Watches.mu. + evMu sync.Mutex `state:"nosave"` + + waiter.Queue `state:"nosave"` + + // A list of pending events for this inotify instance. Protected by evMu. + events ilist.List + + // A scratch buffer, use to serialize inotify events. Use allocate this + // ahead of time and reuse performance. Protected by evMu. + scratch []byte + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // The next watch descriptor number to use for this inotify instance. Note + // that Linux starts numbering watch descriptors from 1. + nextWatch int32 + + // Map from watch descriptors to watch objects. + watches map[int32]*Watch +} + +// NewInotify constructs a new Inotify instance. +func NewInotify(ctx context.Context) *Inotify { + return &Inotify{ + id: uniqueid.GlobalFromContext(ctx), + scratch: make([]byte, inotifyEventBaseSize), + nextWatch: 1, // Linux starts numbering watch descriptors from 1. + watches: make(map[int32]*Watch), + } +} + +// Release implements FileOperations.Release. Release removes all watches and +// frees all resources for an inotify instance. +func (i *Inotify) Release() { + // We need to hold i.mu to avoid a race with concurrent calls to + // Inotify.targetDestroyed from Watches. There's no risk of Watches + // accessing this Inotify after the destructor ends, because we remove all + // references to it below. + i.mu.Lock() + defer i.mu.Unlock() + for _, w := range i.watches { + // Remove references to the watch from the watch target. We don't need + // to worry about the references from the owner instance, since we're in + // the owner's destructor. + w.target.Watches.Remove(w.ID()) + // Don't leak any references to the target, held by pins in the watch. + w.destroy() + } +} + +// Readiness implements waiter.Waitable.Readiness. +// +// Readiness indicates whether there are pending events for an inotify instance. +func (i *Inotify) Readiness(mask waiter.EventMask) waiter.EventMask { + ready := waiter.EventMask(0) + + i.evMu.Lock() + defer i.evMu.Unlock() + + if !i.events.Empty() { + ready |= waiter.EventIn + } + + return mask & ready +} + +// Seek implements FileOperations.Seek. +func (*Inotify) Seek(context.Context, *File, SeekWhence, int64) (int64, error) { + return 0, syserror.ESPIPE +} + +// Readdir implements FileOperatons.Readdir. +func (*Inotify) Readdir(context.Context, *File, DentrySerializer) (int64, error) { + return 0, syserror.ENOTDIR +} + +// Write implements FileOperations.Write. +func (*Inotify) Write(context.Context, *File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EBADF +} + +// Read implements FileOperations.Read. +func (i *Inotify) Read(ctx context.Context, _ *File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() < inotifyEventBaseSize { + return 0, syserror.EINVAL + } + + i.evMu.Lock() + defer i.evMu.Unlock() + + if i.events.Empty() { + // Nothing to read yet, tell caller to block. + return 0, syserror.ErrWouldBlock + } + + var writeLen int64 + for e := i.events.Front(); e != nil; e = e.Next() { + event := e.(*Event) + + // Does the buffer have enough remaining space to hold the event we're + // about to write out? + if dst.NumBytes() < int64(event.sizeOf()) { + if writeLen > 0 { + // Buffer wasn't big enough for all pending events, but we did + // write some events out. + return writeLen, nil + } + return 0, syserror.EINVAL + } + + // Linux always dequeues an available event as long as there's enough + // buffer space to copy it out, even if the copy below fails. Emulate + // this behaviour. + i.events.Remove(e) + + // Buffer has enough space, copy event to the read buffer. + n, err := event.CopyTo(ctx, i.scratch, dst) + if err != nil { + return 0, err + } + + writeLen += n + dst = dst.DropFirst64(n) + } + return writeLen, nil +} + +// Fsync implements FileOperations.Fsync. +func (*Inotify) Fsync(context.Context, *File, int64, int64, SyncType) error { + return syserror.EINVAL +} + +// Flush implements FileOperations.Flush. +func (*Inotify) Flush(context.Context, *File) error { + return nil +} + +// ConfigureMMap implements FileOperations.ConfigureMMap. +func (*Inotify) ConfigureMMap(context.Context, *File, *memmap.MMapOpts) error { + return syserror.ENODEV +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (i *Inotify) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch args[1].Int() { + case linux.FIONREAD: + i.evMu.Lock() + defer i.evMu.Unlock() + var n uint32 + for e := i.events.Front(); e != nil; e = e.Next() { + event := e.(*Event) + n += uint32(event.sizeOf()) + } + var buf [4]byte + usermem.ByteOrder.PutUint32(buf[:], n) + _, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{}) + return 0, err + + default: + return 0, syserror.ENOTTY + } +} + +func (i *Inotify) queueEvent(ev *Event) { + i.evMu.Lock() + defer i.evMu.Unlock() + + // Check if we should coalesce the event we're about to queue with the last + // one currently in the queue. Events are coalesced if they are identical. + if last := i.events.Back(); last != nil { + if ev.equals(last.(*Event)) { + // "Coalesce" the two events by simply not queuing the new one. We + // don't need to raise a waiter.EventIn notification because no new + // data is available for reading. + return + } + } + + i.events.PushBack(ev) + i.Queue.Notify(waiter.EventIn) +} + +// newWatchLocked creates and adds a new watch to target. +func (i *Inotify) newWatchLocked(target *Dirent, mask uint32) *Watch { + wd := i.nextWatch + i.nextWatch++ + + watch := &Watch{ + owner: i, + wd: wd, + mask: mask, + target: target.Inode, + pins: make(map[*Dirent]bool), + } + + i.watches[wd] = watch + + // Grab an extra reference to target to prevent it from being evicted from + // memory. This ref is dropped during either watch removal, target + // destruction, or inotify instance destruction. See callers of Watch.Unpin. + watch.Pin(target) + target.Inode.Watches.Add(watch) + + return watch +} + +// targetDestroyed is called by w to notify i that w's target is gone. This +// automatically generates a watch removal event. +func (i *Inotify) targetDestroyed(w *Watch) { + i.mu.Lock() + _, found := i.watches[w.wd] + delete(i.watches, w.wd) + i.mu.Unlock() + + if found { + i.queueEvent(newEvent(w.wd, "", linux.IN_IGNORED, 0)) + } +} + +// AddWatch constructs a new inotify watch and adds it to the target dirent. It +// returns the watch descriptor returned by inotify_add_watch(2). +func (i *Inotify) AddWatch(target *Dirent, mask uint32) int32 { + // Note: Locking this inotify instance protects the result returned by + // Lookup() below. With the lock held, we know for sure the lookup result + // won't become stale because it's impossible for *this* instance to + // add/remove watches on target. + i.mu.Lock() + defer i.mu.Unlock() + + // Does the target already have a watch from this inotify instance? + if existing := target.Inode.Watches.Lookup(i.id); existing != nil { + // This may be a watch on a different dirent pointing to the + // same inode. Obtain an extra reference if necessary. + existing.Pin(target) + + if mergeMask := mask&linux.IN_MASK_ADD != 0; mergeMask { + // "Add (OR) events to watch mask for this pathname if it already + // exists (instead of replacing mask)." -- inotify(7) + existing.mask |= mask + } else { + existing.mask = mask + } + return existing.wd + } + + // No existing watch, create a new watch. + watch := i.newWatchLocked(target, mask) + return watch.wd +} + +// RmWatch implements watcher.Watchable.RmWatch. +// +// RmWatch looks up an inotify watch for the given 'wd' and configures the +// target dirent to stop sending events to this inotify instance. +func (i *Inotify) RmWatch(wd int32) error { + i.mu.Lock() + + // Find the watch we were asked to removed. + watch, ok := i.watches[wd] + if !ok { + i.mu.Unlock() + return syserror.EINVAL + } + + // Remove the watch from this instance. + delete(i.watches, wd) + + // Remove the watch from the watch target. + watch.target.Watches.Remove(watch.ID()) + + // The watch is now isolated and we can safely drop the instance lock. We + // need to do so because watch.destroy() acquires Watch.mu, which cannot be + // aquired with Inotify.mu held. + i.mu.Unlock() + + // Generate the event for the removal. + i.queueEvent(newEvent(watch.wd, "", linux.IN_IGNORED, 0)) + + // Remove all pins. + watch.destroy() + + return nil +} diff --git a/pkg/sentry/fs/inotify_event.go b/pkg/sentry/fs/inotify_event.go new file mode 100644 index 000000000..217915ba4 --- /dev/null +++ b/pkg/sentry/fs/inotify_event.go @@ -0,0 +1,138 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// inotifyEventBaseSize is the base size of linux's struct inotify_event. This +// must be a power 2 for rounding below. +const inotifyEventBaseSize = 16 + +// Event represents a struct inotify_event from linux. +type Event struct { + ilist.Entry + + wd int32 + mask uint32 + cookie uint32 + + // len is computed based on the name field is set automatically by + // Event.setName. It should be 0 when no name is set; otherwise it is the + // length of the name slice. + len uint32 + + // The name field has special padding requirements and should only be set by + // calling Event.setName. + name []byte +} + +func newEvent(wd int32, name string, events, cookie uint32) *Event { + e := &Event{ + wd: wd, + mask: events, + cookie: cookie, + } + if name != "" { + e.setName(name) + } + return e +} + +// paddedBytes converts a go string to a null-terminated c-string, padded with +// null bytes to a total size of 'l'. 'l' must be large enough for all the bytes +// in the 's' plus at least one null byte. +func paddedBytes(s string, l uint32) []byte { + if l < uint32(len(s)+1) { + panic("Converting string to byte array results in truncation, this can lead to buffer-overflow due to the missing null-byte!") + } + b := make([]byte, l) + copy(b, s) + + // b was zero-value initialized during make(), so the rest of the slice is + // already filled with null bytes. + + return b +} + +// setName sets the optional name for this event. +func (e *Event) setName(name string) { + // We need to pad the name such that the entire event length ends up a + // multiple of inotifyEventBaseSize. + unpaddedLen := len(name) + 1 + // Round up to nearest multiple of inotifyEventBaseSize. + e.len = uint32((unpaddedLen + inotifyEventBaseSize - 1) & ^(inotifyEventBaseSize - 1)) + // Make sure we haven't overflowed and wrapped around when rounding. + if unpaddedLen > int(e.len) { + panic("Overflow when rounding inotify event size, the 'name' field was too big.") + } + e.name = paddedBytes(name, e.len) +} + +func (e *Event) sizeOf() int { + s := inotifyEventBaseSize + int(e.len) + if s < inotifyEventBaseSize { + panic("overflow") + } + return s +} + +// CopyTo serializes this event to dst. buf is used as a scratch buffer to +// construct the output. We use a buffer allocated ahead of time for +// performance. buf must be at least inotifyEventBaseSize bytes. +func (e *Event) CopyTo(ctx context.Context, buf []byte, dst usermem.IOSequence) (int64, error) { + usermem.ByteOrder.PutUint32(buf[0:], uint32(e.wd)) + usermem.ByteOrder.PutUint32(buf[4:], e.mask) + usermem.ByteOrder.PutUint32(buf[8:], e.cookie) + usermem.ByteOrder.PutUint32(buf[12:], e.len) + + writeLen := 0 + + n, err := dst.CopyOut(ctx, buf) + if err != nil { + return 0, err + } + writeLen += n + dst = dst.DropFirst(n) + + if e.len > 0 { + n, err = dst.CopyOut(ctx, e.name) + if err != nil { + return 0, err + } + writeLen += n + } + + // Santiy check. + if writeLen != e.sizeOf() { + panic(fmt.Sprintf("Serialized unexpected amount of data for an event, expected %v, wrote %v.", e.sizeOf(), writeLen)) + } + + return int64(writeLen), nil +} + +func (e *Event) equals(other *Event) bool { + return e.wd == other.wd && + e.mask == other.mask && + e.cookie == other.cookie && + e.len == other.len && + bytes.Equal(e.name, other.name) +} diff --git a/pkg/sentry/fs/inotify_watch.go b/pkg/sentry/fs/inotify_watch.go new file mode 100644 index 000000000..ff6ec6e3e --- /dev/null +++ b/pkg/sentry/fs/inotify_watch.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// Watch represent a particular inotify watch created by inotify_add_watch. +// +// While a watch is active, it ensures the target inode is pinned in memory by +// holding an extra ref on each dirent known (by inotify) to point to the +// inode. These are known as pins. For a full discussion, see +// fs/g3doc/inotify.md. +type Watch struct { + // Inotify instance which owns this watch. + owner *Inotify + + // Descriptor for this watch. This is unique across an inotify instance. + wd int32 + + // Events being monitored via this watch. + mask uint32 + + // The inode being watched. Note that we don't directly hold a reference on + // this inode. Instead we hold a reference on the dirent(s) containing the + // inode, which we record in pins. + target *Inode + + // unpinned indicates whether we have a hard reference on target. This field + // may only be modified through atomic ops. + unpinned uint32 + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // pins is the set of dirents this watch is currently pinning in memory by + // holding a reference to them. See Pin()/Unpin(). + pins map[*Dirent]bool +} + +// ID returns the id of the inotify instance that owns this watch. +func (w *Watch) ID() uint64 { + return w.owner.id +} + +// NotifyParentAfterUnlink indicates whether the parent of the watched object +// should continue to be be notified of events after the target has been +// unlinked. +func (w *Watch) NotifyParentAfterUnlink() bool { + return w.mask&linux.IN_EXCL_UNLINK == 0 +} + +// isRenameEvent returns true if eventMask describes a rename event. +func isRenameEvent(eventMask uint32) bool { + return eventMask&(linux.IN_MOVED_FROM|linux.IN_MOVED_TO|linux.IN_MOVE_SELF) != 0 +} + +// Notify queues a new event on this watch. +func (w *Watch) Notify(name string, events uint32, cookie uint32) { + unmaskableBits := ^uint32(0) &^ linux.IN_ALL_EVENTS + effectiveMask := unmaskableBits | w.mask + matchedEvents := effectiveMask & events + + if matchedEvents == 0 { + // We weren't watching for this event. + return + } + + w.owner.queueEvent(newEvent(w.wd, name, matchedEvents, cookie)) +} + +// Pin acquires a new ref on dirent, which pins the dirent in memory while +// the watch is active. Calling Pin for a second time on the same dirent for +// the same watch is a no-op. +func (w *Watch) Pin(d *Dirent) { + w.mu.Lock() + defer w.mu.Unlock() + if !w.pins[d] { + w.pins[d] = true + d.IncRef() + } +} + +// Unpin drops any extra refs held on dirent due to a previous Pin +// call. Calling Unpin multiple times for the same dirent, or on a dirent +// without a corresponding Pin call is a no-op. +func (w *Watch) Unpin(d *Dirent) { + w.mu.Lock() + defer w.mu.Unlock() + if w.pins[d] { + delete(w.pins, d) + d.DecRef() + } +} + +// TargetDestroyed notifies the owner of the watch that the watch target is +// gone. The owner should release its own references to the watcher upon +// receiving this notification. +func (w *Watch) TargetDestroyed() { + w.owner.targetDestroyed(w) +} + +// destroy prepares the watch for destruction. It unpins all dirents pinned by +// this watch. Destroy does not cause any new events to be generated. The caller +// is responsible for ensuring there are no outstanding references to this +// watch. +func (w *Watch) destroy() { + w.mu.Lock() + defer w.mu.Unlock() + for d := range w.pins { + d.DecRef() + } + w.pins = nil +} diff --git a/pkg/sentry/fs/lock/BUILD b/pkg/sentry/fs/lock/BUILD new file mode 100644 index 000000000..c15dde800 --- /dev/null +++ b/pkg/sentry/fs/lock/BUILD @@ -0,0 +1,72 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "lock_state", + srcs = [ + "lock.go", + "lock_range.go", + "lock_set.go", + ], + out = "lock_state.go", + package = "lock", +) + +go_template_instance( + name = "lock_range", + out = "lock_range.go", + package = "lock", + prefix = "Lock", + template = "//pkg/segment:generic_range", + types = { + "T": "uint64", + }, +) + +go_template_instance( + name = "lock_set", + out = "lock_set.go", + consts = { + "minDegree": "3", + }, + package = "lock", + prefix = "Lock", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "LockRange", + "Value": "Lock", + "Functions": "lockSetFunctions", + }, +) + +go_library( + name = "lock", + srcs = [ + "lock.go", + "lock_range.go", + "lock_set.go", + "lock_set_functions.go", + "lock_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/state", + "//pkg/waiter", + ], +) + +go_test( + name = "lock_test", + size = "small", + srcs = [ + "lock_range_test.go", + "lock_test.go", + ], + embed = [":lock"], +) diff --git a/pkg/sentry/fs/lock/lock.go b/pkg/sentry/fs/lock/lock.go new file mode 100644 index 000000000..24d54c989 --- /dev/null +++ b/pkg/sentry/fs/lock/lock.go @@ -0,0 +1,457 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lock is the API for POSIX-style advisory regional file locks and +// BSD-style full file locks. +// +// Callers needing to enforce these types of locks, like sys_fcntl, can call +// LockRegion and UnlockRegion on a thread-safe set of Locks. Locks are +// specific to a unique file (unique device/inode pair) and for this reason +// should not be shared between files. +// +// A Lock has a set of holders identified by UniqueID. Normally this is the +// pid of the thread attempting to acquire the lock. +// +// Since these are advisory locks, they do not need to be integrated into +// Reads/Writes and for this reason there is no way to *check* if a lock is +// held. One can only attempt to take a lock or unlock an existing lock. +// +// A Lock in a set of Locks is typed: it is either a read lock with any number +// of readers and no writer, or a write lock with no readers. +// +// As expected from POSIX, any attempt to acquire a write lock on a file region +// when there already exits a write lock held by a different uid will fail. Any +// attempt to acquire a write lock on a file region when there is more than one +// reader will fail. Any attempt to acquire a read lock on a file region when +// there is already a writer will fail. +// +// In special cases, a read lock may be upgraded to a write lock and a write lock +// can be downgraded to a read lock. This can only happen if: +// +// * read lock upgrade to write lock: There can be only one reader and the reader +// must be the same as the requested write lock holder. +// +// * write lock downgrade to read lock: The writer must be the same as the requested +// read lock holder. +// +// UnlockRegion always succeeds. If LockRegion fails the caller should normally +// interpret this as "try again later". +package lock + +import ( + "fmt" + "math" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// LockType is a type of regional file lock. +type LockType int + +// UniqueID is a unique identifier of the holder of a regional file lock. +type UniqueID uint64 + +const ( + // ReadLock describes a POSIX regional file lock to be taken + // read only. There may be multiple of these locks on a single + // file region as long as there is no writer lock on the same + // region. + ReadLock LockType = iota + + // WriteLock describes a POSIX regional file lock to be taken + // write only. There may be only a single holder of this lock + // and no read locks. + WriteLock +) + +// LockEOF is the maximal possible end of a regional file lock. +const LockEOF = math.MaxUint64 + +// Lock is a regional file lock. It consists of either a single writer +// or a set of readers. +// +// A Lock may be upgraded from a read lock to a write lock only if there +// is a single reader and that reader has the same uid as the write lock. +// +// A Lock may be downgraded from a write lock to a read lock only if +// the write lock's uid is the same as the read lock. +type Lock struct { + // Readers are the set of read lock holders identified by UniqueID. + // If len(Readers) > 0 then HasWriter must be false. + Readers map[UniqueID]bool + + // HasWriter indicates that this is a write lock held by a single + // UniqueID. + HasWriter bool + + // Writer is only valid if HasWriter is true. It identifies a + // single write lock holder. + Writer UniqueID +} + +// Locks is a thread-safe wrapper around a LockSet. +type Locks struct { + // mu protects locks below. + mu sync.Mutex `state:"nosave"` + + // locks is the set of region locks currently held on an Inode. + locks LockSet + + // blockedQueue is the queue of waiters that are waiting on a lock. + blockedQueue waiter.Queue +} + +// Blocker is the interface used for blocking locks. Passing a nil Blocker +// will be treated as non-blocking. +type Blocker interface { + Block(C chan struct{}) error +} + +const ( + // EventMaskAll is the mask we will always use for locks, by using the + // same mask all the time we can wake up everyone anytime the lock + // changes state. + EventMaskAll waiter.EventMask = 0xFFFF +) + +// LockRegion attempts to acquire a typed lock for the uid on a region +// of a file. Returns true if successful in locking the region. If false +// is returned, the caller should normally interpret this as "try again later" if +// accquiring the lock in a non-blocking mode or "interrupted" if in a blocking mode. +// Blocker is the interface used to provide blocking behavior, passing a nil Blocker +// will result in non-blocking behavior. +func (l *Locks) LockRegion(uid UniqueID, t LockType, r LockRange, block Blocker) bool { + for { + l.mu.Lock() + + // Blocking locks must run in a loop because we'll be woken up whenever an unlock event + // happens for this lock. We will then attempt to take the lock again and if it fails + // continue blocking. + res := l.locks.lock(uid, t, r) + if !res && block != nil { + e, ch := waiter.NewChannelEntry(nil) + l.blockedQueue.EventRegister(&e, EventMaskAll) + l.mu.Unlock() + if err := block.Block(ch); err != nil { + // We were interrupted, the caller can translate this to EINTR if applicable. + l.blockedQueue.EventUnregister(&e) + return false + } + l.blockedQueue.EventUnregister(&e) + continue // Try again now that someone has unlocked. + } + + l.mu.Unlock() + return res + } +} + +// UnlockRegion attempts to release a lock for the uid on a region of a file. +// This operation is always successful, even if there did not exist a lock on +// the requested region held by uid in the first place. +func (l *Locks) UnlockRegion(uid UniqueID, r LockRange) { + l.mu.Lock() + defer l.mu.Unlock() + l.locks.unlock(uid, r) + + // Now that we've released the lock, we need to wake up any waiters. + l.blockedQueue.Notify(EventMaskAll) +} + +// makeLock returns a new typed Lock that has either uid as its only reader +// or uid as its only writer. +func makeLock(uid UniqueID, t LockType) Lock { + value := Lock{Readers: make(map[UniqueID]bool)} + switch t { + case ReadLock: + value.Readers[uid] = true + case WriteLock: + value.HasWriter = true + value.Writer = uid + default: + panic(fmt.Sprintf("makeLock: invalid lock type %d", t)) + } + return value +} + +// isHeld returns true if uid is a holder of Lock. +func (l Lock) isHeld(uid UniqueID) bool { + if l.HasWriter && l.Writer == uid { + return true + } + return l.Readers[uid] +} + +// lock sets uid as a holder of a typed lock on Lock. +// +// Preconditions: canLock is true for the range containing this Lock. +func (l *Lock) lock(uid UniqueID, t LockType) { + switch t { + case ReadLock: + // If we are already a reader, then this is a no-op. + if l.Readers[uid] { + return + } + // We cannot downgrade a write lock to a read lock unless the + // uid is the same. + if l.HasWriter { + if l.Writer != uid { + panic(fmt.Sprintf("lock: cannot downgrade write lock to read lock for uid %d, writer is %d", uid, l.Writer)) + } + // Ensure that there is only one reader if upgrading. + l.Readers = make(map[UniqueID]bool) + // Ensure that there is no longer a writer. + l.HasWriter = false + } + l.Readers[uid] = true + return + case WriteLock: + // If we are already the writer, then this is a no-op. + if l.HasWriter && l.Writer == uid { + return + } + // We can only upgrade a read lock to a write lock if there + // is only one reader and that reader has the same uid as + // the write lock. + if readers := len(l.Readers); readers > 0 { + if readers != 1 { + panic(fmt.Sprintf("lock: cannot upgrade read lock to write lock for uid %d, too many readers %v", uid, l.Readers)) + } + if !l.Readers[uid] { + panic(fmt.Sprintf("lock: cannot upgrade read lock to write lock for uid %d, conflicting reader %v", uid, l.Readers)) + } + } + // Ensure that there is only a writer. + l.Readers = make(map[UniqueID]bool) + l.HasWriter = true + l.Writer = uid + default: + panic(fmt.Sprintf("lock: invalid lock type %d", t)) + } +} + +// lockable returns true if check returns true for every Lock in LockRange. +// Further, check should return true if Lock meets the callers requirements +// for locking Lock. +func (l LockSet) lockable(r LockRange, check func(value Lock) bool) bool { + // Get our starting point. + seg := l.LowerBoundSegment(r.Start) + for seg.Ok() && seg.Start() < r.End { + // Note that we don't care about overruning the end of the + // last segment because if everything checks out we'll just + // split the last segment. + if !check(seg.Value()) { + return false + } + // Jump to the next segment, ignoring gaps, for the same + // reason we ignored the first gap. + seg = seg.NextSegment() + } + // No conflict, we can get a lock for uid over the entire range. + return true +} + +// canLock returns true if uid will be able to take a Lock of type t on the +// entire range specified by LockRange. +func (l LockSet) canLock(uid UniqueID, t LockType, r LockRange) bool { + switch t { + case ReadLock: + return l.lockable(r, func(value Lock) bool { + // If there is no writer, there's no problem adding + // another reader. + if !value.HasWriter { + return true + } + // If there is a writer, then it must be the same uid + // in order to downgrade the lock to a read lock. + return value.Writer == uid + }) + case WriteLock: + return l.lockable(r, func(value Lock) bool { + // If there are only readers. + if !value.HasWriter { + // Then this uid can only take a write lock if + // this is a private upgrade, meaning that the + // only reader is uid. + return len(value.Readers) == 1 && value.Readers[uid] + } + // If the uid is already a writer on this region, then + // adding a write lock would be a no-op. + return value.Writer == uid + }) + default: + panic(fmt.Sprintf("canLock: invalid lock type %d", t)) + } +} + +// lock returns true if uid took a lock of type t on the entire range of LockRange. +// +// Preconditions: r.Start <= r.End (will panic otherwise). +func (l *LockSet) lock(uid UniqueID, t LockType, r LockRange) bool { + if r.Start > r.End { + panic(fmt.Sprintf("lock: r.Start %d > r.End %d", r.Start, r.End)) + } + + // Don't attempt to insert anything with a range of 0 and treat this + // as a successful no-op. + if r.Length() == 0 { + return true + } + + // Do a first-pass check. We *could* hold onto the segments we + // checked if canLock would return true, but traversing the segment + // set should be fast and this keeps things simple. + if !l.canLock(uid, t, r) { + return false + } + // Get our starting point. + seg, gap := l.Find(r.Start) + if gap.Ok() { + // Fill in the gap and get the next segment to modify. + seg = l.Insert(gap, gap.Range().Intersect(r), makeLock(uid, t)).NextSegment() + } else if seg.Start() < r.Start { + // Get our first segment to modify. + _, seg = l.Split(seg, r.Start) + } + for seg.Ok() && seg.Start() < r.End { + // Split the last one if necessary. + if seg.End() > r.End { + seg, _ = l.SplitUnchecked(seg, r.End) + } + + // Set the lock on the segment. This is guaranteed to + // always be safe, given canLock above. + value := seg.ValuePtr() + value.lock(uid, t) + + // Fill subsequent gaps. + gap = seg.NextGap() + if gr := gap.Range().Intersect(r); gr.Length() > 0 { + seg = l.Insert(gap, gr, makeLock(uid, t)).NextSegment() + } else { + seg = gap.NextSegment() + } + } + return true +} + +// unlock is always successful. If uid has no locks held for the range LockRange, +// unlock is a no-op. +// +// Preconditions: same as lock. +func (l *LockSet) unlock(uid UniqueID, r LockRange) { + if r.Start > r.End { + panic(fmt.Sprintf("unlock: r.Start %d > r.End %d", r.Start, r.End)) + } + + // Same as setlock. + if r.Length() == 0 { + return + } + + // Get our starting point. + seg := l.LowerBoundSegment(r.Start) + for seg.Ok() && seg.Start() < r.End { + // If this segment doesn't have a lock from uid then + // there is no need to fragment the set with Isolate (below). + // In this case just move on to the next segment. + if !seg.Value().isHeld(uid) { + seg = seg.NextSegment() + continue + } + + // Ensure that if we need to unlock a sub-segment that + // we don't unlock/remove that entire segment. + seg = l.Isolate(seg, r) + + value := seg.Value() + var remove bool + if value.HasWriter && value.Writer == uid { + // If we are unlocking a writer, then since there can + // only ever be one writer and no readers, then this + // lock should always be removed from the set. + remove = true + } else if value.Readers[uid] { + // If uid is the last reader, then just remove the entire + // segment. + if len(value.Readers) == 1 { + remove = true + } else { + // Otherwise we need to remove this reader without + // affecting any other segment's readers. To do + // this, we need to make a copy of the Readers map + // and not add this uid. + newValue := Lock{Readers: make(map[UniqueID]bool)} + for k, v := range value.Readers { + if k != uid { + newValue.Readers[k] = v + } + } + seg.SetValue(newValue) + } + } + if remove { + seg = l.Remove(seg).NextSegment() + } else { + seg = seg.NextSegment() + } + } +} + +// ComputeRange takes a positive file offset and computes the start of a LockRange +// using start (relative to offset) and the end of the LockRange using length. The +// values of start and length may be negative but the resulting LockRange must +// preserve that LockRange.Start < LockRange.End and LockRange.Start > 0. +func ComputeRange(start, length, offset int64) (LockRange, error) { + offset += start + // fcntl(2): "l_start can be a negative number provided the offset + // does not lie before the start of the file" + if offset < 0 { + return LockRange{}, syscall.EINVAL + } + + // fcntl(2): Specifying 0 for l_len has the special meaning: lock all + // bytes starting at the location specified by l_whence and l_start + // through to the end of file, no matter how large the file grows. + end := uint64(LockEOF) + if length > 0 { + // fcntl(2): If l_len is positive, then the range to be locked + // covers bytes l_start up to and including l_start+l_len-1. + // + // Since LockRange.End is exclusive we need not -1 from length.. + end = uint64(offset + length) + } else if length < 0 { + // fcntl(2): If l_len is negative, the interval described by + // lock covers bytes l_start+l_len up to and including l_start-1. + // + // Since LockRange.End is exclusive we need not -1 from offset. + signedEnd := offset + // Add to offset using a negative length (subtract). + offset += length + if offset < 0 { + return LockRange{}, syscall.EINVAL + } + if signedEnd < offset { + return LockRange{}, syscall.EOVERFLOW + } + // At this point signedEnd cannot be negative, + // since we asserted that offset is not negative + // and it is not less than offset. + end = uint64(signedEnd) + } + // Offset is guaranteed to be positive at this point. + return LockRange{Start: uint64(offset), End: end}, nil +} diff --git a/pkg/sentry/fs/lock/lock_range_test.go b/pkg/sentry/fs/lock/lock_range_test.go new file mode 100644 index 000000000..06a37c701 --- /dev/null +++ b/pkg/sentry/fs/lock/lock_range_test.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "syscall" + "testing" +) + +func TestComputeRange(t *testing.T) { + tests := []struct { + // Description of test. + name string + + // Requested start of the lock range. + start int64 + + // Requested length of the lock range, + // can be negative :( + length int64 + + // Pre-computed file offset based on whence. + // Will be added to start. + offset int64 + + // Expected error. + err error + + // If error is nil, the expected LockRange. + LockRange + }{ + { + name: "offset, start, and length all zero", + LockRange: LockRange{Start: 0, End: LockEOF}, + }, + { + name: "zero offset, zero start, positive length", + start: 0, + length: 4096, + offset: 0, + LockRange: LockRange{Start: 0, End: 4096}, + }, + { + name: "zero offset, negative start", + start: -4096, + offset: 0, + err: syscall.EINVAL, + }, + { + name: "large offset, negative start, positive length", + start: -2048, + length: 2048, + offset: 4096, + LockRange: LockRange{Start: 2048, End: 4096}, + }, + { + name: "large offset, negative start, zero length", + start: -2048, + length: 0, + offset: 4096, + LockRange: LockRange{Start: 2048, End: LockEOF}, + }, + { + name: "zero offset, zero start, negative length", + start: 0, + length: -4096, + offset: 0, + err: syscall.EINVAL, + }, + { + name: "large offset, zero start, negative length", + start: 0, + length: -4096, + offset: 4096, + LockRange: LockRange{Start: 0, End: 4096}, + }, + { + name: "offset, start, and length equal, length is negative", + start: 1024, + length: -1024, + offset: 1024, + LockRange: LockRange{Start: 1024, End: 2048}, + }, + { + name: "offset, start, and length equal, start is negative", + start: -1024, + length: 1024, + offset: 1024, + LockRange: LockRange{Start: 0, End: 1024}, + }, + { + name: "offset, start, and length equal, offset is negative", + start: 1024, + length: 1024, + offset: -1024, + LockRange: LockRange{Start: 0, End: 1024}, + }, + { + name: "offset, start, and length equal, all negative", + start: -1024, + length: -1024, + offset: -1024, + err: syscall.EINVAL, + }, + { + name: "offset, start, and length equal, all positive", + start: 1024, + length: 1024, + offset: 1024, + LockRange: LockRange{Start: 2048, End: 3072}, + }, + } + + for _, test := range tests { + rng, err := ComputeRange(test.start, test.length, test.offset) + if err != test.err { + t.Errorf("%s: lockRange(%d, %d, %d) got error %v, want %v", test.name, test.start, test.length, test.offset, err, test.err) + continue + } + if err == nil && rng != test.LockRange { + t.Errorf("%s: lockRange(%d, %d, %d) got LockRange %v, want %v", test.name, test.start, test.length, test.offset, rng, test.LockRange) + } + } +} diff --git a/pkg/sentry/fs/lock/lock_set_functions.go b/pkg/sentry/fs/lock/lock_set_functions.go new file mode 100644 index 000000000..e16f485be --- /dev/null +++ b/pkg/sentry/fs/lock/lock_set_functions.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "math" +) + +// LockSet maps a set of Locks into a file. The key is the file offset. + +type lockSetFunctions struct{} + +func (lockSetFunctions) MinKey() uint64 { + return 0 +} + +func (lockSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +func (lockSetFunctions) ClearValue(l *Lock) { + *l = Lock{} +} + +func (lockSetFunctions) Merge(r1 LockRange, val1 Lock, r2 LockRange, val2 Lock) (Lock, bool) { + // Merge only if the Readers/Writers are identical. + if len(val1.Readers) != len(val2.Readers) { + return Lock{}, false + } + for k := range val1.Readers { + if !val2.Readers[k] { + return Lock{}, false + } + } + if val1.HasWriter != val2.HasWriter { + return Lock{}, false + } + if val1.HasWriter { + if val1.Writer != val2.Writer { + return Lock{}, false + } + } + return val1, true +} + +func (lockSetFunctions) Split(r LockRange, val Lock, split uint64) (Lock, Lock) { + // Copy the segment so that split segments don't contain map references + // to other segments. + val0 := Lock{Readers: make(map[UniqueID]bool)} + for k, v := range val.Readers { + val0.Readers[k] = v + } + val0.HasWriter = val.HasWriter + val0.Writer = val.Writer + + return val, val0 +} diff --git a/pkg/sentry/fs/lock/lock_test.go b/pkg/sentry/fs/lock/lock_test.go new file mode 100644 index 000000000..c60f5f7a2 --- /dev/null +++ b/pkg/sentry/fs/lock/lock_test.go @@ -0,0 +1,1059 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lock + +import ( + "reflect" + "testing" +) + +type entry struct { + Lock + LockRange +} + +func equals(e0, e1 []entry) bool { + if len(e0) != len(e1) { + return false + } + for i := range e0 { + for k := range e0[i].Lock.Readers { + if !e1[i].Lock.Readers[k] { + return false + } + } + for k := range e1[i].Lock.Readers { + if !e0[i].Lock.Readers[k] { + return false + } + } + if !reflect.DeepEqual(e0[i].LockRange, e1[i].LockRange) { + return false + } + if e0[i].Lock.HasWriter != e1[i].Lock.HasWriter { + return false + } + if e0[i].Lock.Writer != e1[i].Lock.Writer { + return false + } + } + return true +} + +// fill a LockSet with consecutive region locks. Will panic if +// LockRanges are not consecutive. +func fill(entries []entry) LockSet { + l := LockSet{} + for _, e := range entries { + gap := l.FindGap(e.LockRange.Start) + if !gap.Ok() { + panic("cannot insert into existing segment") + } + l.Insert(gap, e.LockRange, e.Lock) + } + return l +} + +func TestCanLockEmpty(t *testing.T) { + l := LockSet{} + + // Expect to be able to take any locks given that the set is empty. + eof := l.FirstGap().End() + r := LockRange{0, eof} + if !l.canLock(1, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1) + } + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + if !l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2) + } +} + +func TestCanLock(t *testing.T) { + // + -------------- + ---------- + -------------- + --------- + + // | Readers 1 & 2 | Readers 1 | Readers 1 & 3 | Writer 1 | + // + ------------- + ---------- + -------------- + --------- + + // 0 1024 2048 3072 4096 + l := fill([]entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 3: true}}, + LockRange: LockRange{2048, 3072}, + }, + { + Lock: Lock{HasWriter: true, Writer: 1}, + LockRange: LockRange{3072, 4096}, + }, + }) + + // Now that we have a mildly interesting layout, try some checks on different + // ranges, uids, and lock types. + // + // Expect to be able to extend the read lock, despite the writer lock, because + // the writer has the same uid as the requested read lock. + r := LockRange{0, 8192} + if !l.canLock(1, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 1) + } + // Expect to *not* be able to extend the read lock since there is an overlapping + // writer region locked by someone other than the uid. + if l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", ReadLock, r, 2) + } + // Expect to be able to extend the read lock if there are only other readers in + // the way. + r = LockRange{64, 3072} + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + // Expect to be able to set a read lock beyond the range of any existing locks. + r = LockRange{4096, 10240} + if !l.canLock(2, ReadLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", ReadLock, r, 2) + } + + // Expect to not be able to take a write lock with other readers in the way. + r = LockRange{0, 8192} + if l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 1) + } + // Expect to be able to extend the write lock for the same uid. + r = LockRange{3072, 8192} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + // Expect to not be able to overlap a write lock for two different uids. + if l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got true, want false", WriteLock, r, 2) + } + // Expect to be able to set a write lock that is beyond the range of any + // existing locks. + r = LockRange{8192, 10240} + if !l.canLock(2, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 2) + } + // Expect to be able to upgrade a read lock (any portion of it). + r = LockRange{1024, 2048} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } + r = LockRange{1080, 2000} + if !l.canLock(1, WriteLock, r) { + t.Fatalf("canLock type %d for range %v and uid %d got false, want true", WriteLock, r, 1) + } +} + +func TestSetLock(t *testing.T) { + tests := []struct { + // description of test. + name string + + // LockSet entries to pre-fill. + before []entry + + // Description of region to lock: + // + // start is the file offset of the lock. + start uint64 + // end is the end file offset of the lock. + end uint64 + // uid of lock attempter. + uid UniqueID + // lock type requested. + lockType LockType + + // success is true if taking the above + // lock should succeed. + success bool + + // Expected layout of the set after locking + // if success is true. + after []entry + }{ + { + name: "set zero length ReadLock on empty set", + start: 0, + end: 0, + uid: 0, + lockType: ReadLock, + success: true, + }, + { + name: "set zero length WriteLock on empty set", + start: 0, + end: 0, + uid: 0, + lockType: WriteLock, + success: true, + }, + { + name: "set ReadLock on empty set", + start: 0, + end: LockEOF, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "set WriteLock on empty set", + start: 0, + end: LockEOF, + uid: 0, + lockType: WriteLock, + success: true, + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "set ReadLock on WriteLock same uid", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------- + --------------------------- + + // | Readers 0 | Writer 0 | + // + ----------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "set WriteLock on ReadLock same uid", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + lockType: WriteLock, + success: true, + // + ----------- + --------------------------- + + // | Writer 0 | Readers 0 | + // + ----------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "set ReadLock on WriteLock different uid", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: false, + }, + { + name: "set WriteLock on ReadLock different uid", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: WriteLock, + success: false, + }, + { + name: "split ReadLock for overlapping lock at start 0", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: true, + // + -------------- + --------------------------- + + // | Readers 0 & 1 | Readers 0 | + // + -------------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "split ReadLock for overlapping lock at non-zero start", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: 8192, + uid: 1, + lockType: ReadLock, + success: true, + // + ---------- + -------------- + ----------- + + // | Readers 0 | Readers 0 & 1 | Readers 0 | + // + ---------- + -------------- + ----------- + + // 0 4096 8192 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, 8192}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{8192, LockEOF}, + }, + }, + }, + { + name: "fill front gap with ReadLock", + // + --------- + ---------------------------- + + // | gap | Readers 0 | + // + --------- + ---------------------------- + + // 0 1024 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + start: 0, + end: 8192, + uid: 0, + lockType: ReadLock, + success: true, + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "fill end gap with ReadLock", + // + ---------------------------- + + // | Readers 0 | + // + ---------------------------- + + // 0 4096 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + }, + start: 1024, + end: LockEOF, + uid: 0, + lockType: ReadLock, + success: true, + // Note that this is not merged after lock does a Split. This is + // fine because the two locks will still *behave* as one. In other + // words we can fragment any lock all we want and semantically it + // makes no difference. + // + // + ----------- + --------------------------- + + // | Readers 0 | Readers 0 | + // + ----------- + --------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + }, + { + name: "fill gap with ReadLock and split", + // + --------- + ---------------------------- + + // | gap | Readers 0 | + // + --------- + ---------------------------- + + // 0 1024 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 1, + lockType: ReadLock, + success: true, + // + --------- + ------------- + ------------- + + // | Reader 1 | Readers 0 & 1 | Reader 0 | + // + ----------+ ------------- + ------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "upgrade ReadLock to WriteLock for single uid fill gap", + // + ------------- + --------- + --- + ------------- + + // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 | + // + ------------- + --------- + --- + ------------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + lockType: WriteLock, + success: true, + // + ------------- + -------- + ------------- + + // | Readers 0 & 1 | Writer 0 | Readers 0 & 2 | + // + ------------- + -------- + ------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "upgrade ReadLock to WriteLock for single uid keep gap", + // + ------------- + --------- + --- + ------------- + + // | Readers 0 & 1 | Readers 0 | gap | Readers 0 & 2 | + // + ------------- + --------- + --- + ------------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 1024, + end: 3072, + uid: 0, + lockType: WriteLock, + success: true, + // + ------------- + -------- + --- + ------------- + + // | Readers 0 & 1 | Writer 0 | gap | Readers 0 & 2 | + // + ------------- + -------- + --- + ------------- + + // 0 1024 3072 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{1024, 3072}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "fail to upgrade ReadLock to WriteLock with conflicting Reader", + // + ------------- + --------- + + // | Readers 0 & 1 | Readers 0 | + // + ------------- + --------- + + // 0 1024 2048 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + }, + start: 0, + end: 2048, + uid: 0, + lockType: WriteLock, + success: false, + }, + { + name: "take WriteLock on whole file if all uids are the same", + // + ------------- + --------- + --------- + ---------- + + // | Writer 0 | Readers 0 | Readers 0 | Readers 0 | + // + ------------- + --------- + --------- + ---------- + + // 0 1024 2048 4096 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{1024, 2048}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{2048, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + lockType: WriteLock, + success: true, + // We do not manually merge locks. Semantically a fragmented lock + // held by the same uid will behave as one lock so it makes no difference. + // + // + ------------- + ---------------------------- + + // | Writer 0 | Writer 0 | + // + ------------- + ---------------------------- + + // 0 1024 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{1024, LockEOF}, + }, + }, + }, + } + + for _, test := range tests { + l := fill(test.before) + + r := LockRange{Start: test.start, End: test.end} + success := l.lock(test.uid, test.lockType, r) + var got []entry + for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + got = append(got, entry{ + Lock: seg.Value(), + LockRange: seg.Range(), + }) + } + + if success != test.success { + t.Errorf("%s: setlock(%v, %+v, %d, %d) got success %v, want %v", test.name, test.before, r, test.uid, test.lockType, success, test.success) + continue + } + + if success { + if !equals(got, test.after) { + t.Errorf("%s: got set %+v, want %+v", test.name, got, test.after) + } + } + } +} + +func TestUnlock(t *testing.T) { + tests := []struct { + // description of test. + name string + + // LockSet entries to pre-fill. + before []entry + + // Description of region to unlock: + // + // start is the file start of the lock. + start uint64 + // end is the end file start of the lock. + end uint64 + // uid of lock holder. + uid UniqueID + + // Expected layout of the set after unlocking. + after []entry + }{ + { + name: "unlock zero length on empty set", + start: 0, + end: 0, + uid: 0, + }, + { + name: "unlock on empty set (no-op)", + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock uid not locked (no-op)", + // + --------------------------- + + // | Readers 1 & 2 | + // + --------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + // + --------------------------- + + // | Readers 1 & 2 | + // + --------------------------- + + // 0 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + }, + { + name: "unlock ReadLock over entire file", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock WriteLock over entire file", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + }, + { + name: "unlock partial ReadLock (start)", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + // + ------ + --------------------------- + + // | gap | Readers 0 | + // +------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock partial WriteLock (start)", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 0, + end: 4096, + uid: 0, + // + ------ + --------------------------- + + // | gap | Writer 0 | + // +------- + --------------------------- + + // 0 4096 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock partial ReadLock (end)", + // + ----------------------------------------- + + // | Readers 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: LockEOF, + uid: 0, + // + --------------------------- + + // | Readers 0 | + // +---------------------------- + + // 0 4096 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true}}, + LockRange: LockRange{0, 4096}, + }, + }, + }, + { + name: "unlock partial WriteLock (end)", + // + ----------------------------------------- + + // | Writer 0 | + // + ----------------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 4096, + end: LockEOF, + uid: 0, + // + --------------------------- + + // | Writer 0 | + // +---------------------------- + + // 0 4096 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 4096}, + }, + }, + }, + { + name: "unlock for single uid", + // + ------------- + --------- + ------------------- + + // | Readers 0 & 1 | Writer 0 | Readers 0 & 1 & 2 | + // + ------------- + --------- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 0, + end: LockEOF, + uid: 0, + // + --------- + --- + --------------- + + // | Readers 1 | gap | Readers 1 & 2 | + // + --------- + --- + --------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock subsection locked", + // + ------------------------------- + + // | Readers 0 & 1 & 2 | + // + ------------------------------- + + // 0 max uint64 + before: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{0, LockEOF}, + }, + }, + start: 1024, + end: 4096, + uid: 0, + // + ----------------- + ------------- + ----------------- + + // | Readers 0 & 1 & 2 | Readers 1 & 2 | Readers 0 & 1 & 2 | + // + ----------------- + ------------- + ----------------- + + // 0 1024 4096 max uint64 + after: []entry{ + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true, 2: true}}, + LockRange: LockRange{1024, 4096}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true, 2: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock mid-gap to increase gap", + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 8, + end: 2048, + uid: 0, + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 8 4096 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 8}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + }, + { + name: "unlock split region on uid mid-gap", + // + --------- + ----- + ------------------- + + // | Writer 0 | gap | Readers 0 & 1 | + // + --------- + ----- + ------------------- + + // 0 1024 4096 max uint64 + before: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{4096, LockEOF}, + }, + }, + start: 2048, + end: 8192, + uid: 0, + // + --------- + ----- + --------- + ------------- + + // | Writer 0 | gap | Readers 1 | Readers 0 & 1 | + // + --------- + ----- + --------- + ------------- + + // 0 1024 4096 8192 max uint64 + after: []entry{ + { + Lock: Lock{HasWriter: true, Writer: 0}, + LockRange: LockRange{0, 1024}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{1: true}}, + LockRange: LockRange{4096, 8192}, + }, + { + Lock: Lock{Readers: map[UniqueID]bool{0: true, 1: true}}, + LockRange: LockRange{8192, LockEOF}, + }, + }, + }, + } + + for _, test := range tests { + l := fill(test.before) + + r := LockRange{Start: test.start, End: test.end} + l.unlock(test.uid, r) + var got []entry + for seg := l.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + got = append(got, entry{ + Lock: seg.Value(), + LockRange: seg.Range(), + }) + } + if !equals(got, test.after) { + t.Errorf("%s: got set %+v, want %+v", test.name, got, test.after) + } + } +} diff --git a/pkg/sentry/fs/mock.go b/pkg/sentry/fs/mock.go new file mode 100644 index 000000000..b3bfa5268 --- /dev/null +++ b/pkg/sentry/fs/mock.go @@ -0,0 +1,177 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// MockInodeOperations implements InodeOperations for testing Inodes. +type MockInodeOperations struct { + InodeOperations + + UAttr UnstableAttr + + createCalled bool + createDirectoryCalled bool + createLinkCalled bool + renameCalled bool + walkCalled bool +} + +// NewMockInode returns a mock *Inode using MockInodeOperations. +func NewMockInode(ctx context.Context, msrc *MountSource, sattr StableAttr) *Inode { + return NewInode(NewMockInodeOperations(ctx), msrc, sattr) +} + +// NewMockInodeOperations returns a *MockInodeOperations. +func NewMockInodeOperations(ctx context.Context) *MockInodeOperations { + return &MockInodeOperations{ + UAttr: WithCurrentTime(ctx, UnstableAttr{ + Perms: FilePermsFromMode(0777), + }), + } +} + +// MockMountSourceOps implements fs.MountSourceOperations. +type MockMountSourceOps struct { + MountSourceOperations + keep bool + revalidate bool +} + +// NewMockMountSource returns a new *MountSource using MockMountSourceOps. +func NewMockMountSource(cache *DirentCache) *MountSource { + var keep bool + if cache != nil { + keep = cache.maxSize > 0 + } + return &MountSource{ + MountSourceOperations: &MockMountSourceOps{keep: keep}, + fscache: cache, + children: make(map[*MountSource]struct{}), + } +} + +// Revalidate implements fs.MountSourceOperations.Revalidate. +func (n *MockMountSourceOps) Revalidate(*Dirent) bool { + return n.revalidate +} + +// Keep implements fs.MountSourceOperations.Keep. +func (n *MockMountSourceOps) Keep(dirent *Dirent) bool { + return n.keep +} + +// WriteOut implements fs.InodeOperations.WriteOut. +func (n *MockInodeOperations) WriteOut(context.Context, *Inode) error { + return nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (n *MockInodeOperations) UnstableAttr(context.Context, *Inode) (UnstableAttr, error) { + return n.UAttr, nil +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (n *MockInodeOperations) IsVirtual() bool { + return false +} + +// Lookup implements fs.InodeOperations.Lookup. +func (n *MockInodeOperations) Lookup(ctx context.Context, dir *Inode, p string) (*Dirent, error) { + n.walkCalled = true + return NewDirent(NewInode(&MockInodeOperations{}, dir.MountSource, StableAttr{}), p), nil +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (n *MockInodeOperations) SetPermissions(context.Context, *Inode, FilePermissions) bool { + return false +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (*MockInodeOperations) SetOwner(context.Context, *Inode, FileOwner) error { + return syserror.EINVAL +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (n *MockInodeOperations) SetTimestamps(context.Context, *Inode, TimeSpec) error { + return nil +} + +// Create implements fs.InodeOperations.Create. +func (n *MockInodeOperations) Create(ctx context.Context, dir *Inode, p string, flags FileFlags, perms FilePermissions) (*File, error) { + n.createCalled = true + d := NewDirent(NewInode(&MockInodeOperations{}, dir.MountSource, StableAttr{}), p) + return &File{Dirent: d}, nil +} + +// CreateLink implements fs.InodeOperations.CreateLink. +func (n *MockInodeOperations) CreateLink(_ context.Context, dir *Inode, oldname string, newname string) error { + n.createLinkCalled = true + return nil +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +func (n *MockInodeOperations) CreateDirectory(context.Context, *Inode, string, FilePermissions) error { + n.createDirectoryCalled = true + return nil +} + +// Rename implements fs.InodeOperations.Rename. +func (n *MockInodeOperations) Rename(ctx context.Context, oldParent *Inode, oldName string, newParent *Inode, newName string) error { + n.renameCalled = true + return nil +} + +// Check implements fs.InodeOperations.Check. +func (n *MockInodeOperations) Check(ctx context.Context, inode *Inode, p PermMask) bool { + return ContextCanAccessFile(ctx, inode, p) +} + +// Release implements fs.InodeOperations.Release. +func (n *MockInodeOperations) Release(context.Context) {} + +// Truncate implements fs.InodeOperations.Truncate. +func (n *MockInodeOperations) Truncate(ctx context.Context, inode *Inode, size int64) error { + return nil +} + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev. +func (n *MockInodeOperations) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, nil +} + +// DeprecatedReaddir implements fs.InodeOperations.DeprecatedReaddir. +func (n *MockInodeOperations) DeprecatedReaddir(context.Context, *DirCtx, int) (int, error) { + return 0, nil +} + +// Remove implements fs.InodeOperations.Remove. +func (n *MockInodeOperations) Remove(context.Context, *Inode, string) error { + return nil +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +func (n *MockInodeOperations) RemoveDirectory(context.Context, *Inode, string) error { + return nil +} + +// Getlink implements fs.InodeOperations.Getlink. +func (n *MockInodeOperations) Getlink(context.Context, *Inode) (*Dirent, error) { + return nil, syserror.ENOLINK +} diff --git a/pkg/sentry/fs/mount.go b/pkg/sentry/fs/mount.go new file mode 100644 index 000000000..a2943b097 --- /dev/null +++ b/pkg/sentry/fs/mount.go @@ -0,0 +1,298 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/refs" +) + +// DirentOperations provide file systems greater control over how long a Dirent stays pinned +// in core. Implementations must not take Dirent.mu. +type DirentOperations interface { + // Revalidate returns true if the Dirent is stale and its InodeOperations needs to be reloaded. Revalidate + // will never be called on a Dirent that is mounted. + Revalidate(dirent *Dirent) bool + + // Keep returns true if the Dirent should be kept in memory for as long as possible + // beyond any active references. + Keep(dirent *Dirent) bool +} + +// MountSourceOperations contains filesystem specific operations. +type MountSourceOperations interface { + // TODO: Add: + // + // StatFS() (Info, error) + // BlockSize() int64 + // FS() Filesystem + + // DirentOperations provide optional extra management of Dirents. + DirentOperations + + // Destroy destroys the MountSource. + Destroy() + + // Below are MountSourceOperations that do not conform to Linux. + + // ResetInodeMappings clears all mappings of Inodes before SaveInodeMapping + // is called. + ResetInodeMappings() + + // SaveInodeMappings is called during saving to store, for each reachable + // Inode in the mounted filesystem, a mapping of Inode.StableAttr.InodeID + // to the Inode's path relative to its mount point. If an Inode is + // reachable at more than one path due to hard links, it is unspecified + // which path is mapped. Filesystems that do not use this information to + // restore inodes can make SaveInodeMappings a no-op. + SaveInodeMapping(inode *Inode, path string) +} + +// InodeMappings defines a fmt.Stringer MountSource Inode mappings. +type InodeMappings map[uint64]string + +// String implements fmt.Stringer.String. +func (i InodeMappings) String() string { + var mappingsBuf bytes.Buffer + mappingsBuf.WriteString("\n") + for ino, name := range i { + mappingsBuf.WriteString(fmt.Sprintf("\t%q\t\tinode number %d\n", name, ino)) + } + return mappingsBuf.String() +} + +// MountSource represents a source of file objects. +// +// MountSource corresponds to struct super_block in Linux. +// +// A mount source may represent a physical device (or a partition of a physical +// device) or a virtual source of files such as procfs for a specific PID +// namespace. There should be only one mount source per logical device. E.g. +// there should be only procfs mount source for a given PID namespace. +// +// A mount source represents files as inodes. Every inode belongs to exactly +// one mount source. Each file object may only be represented using one inode +// object in a sentry instance. +// +// This is an amalgamation of structs super_block, vfsmount, and mount, while +// MountSourceOperations is akin to struct super_operations. +// +// Hence, mount source also contains common mounted file system state, such as +// mount flags, the root Dirent, and children mounts. For now, this +// amalgamation implies that a mount source cannot be shared by multiple mounts +// (e.g. cannot be mounted at different locations). +// +// TODO: Move mount-specific information out of MountSource. +type MountSource struct { + refs.AtomicRefCount + + // MountSourceOperations defines filesystem specific behavior. + MountSourceOperations + + // Filesystem is the filesystem backing the mount. Can be nil if there + // is no filesystem backing the mount. + Filesystem Filesystem + + // Flags are the flags that this filesystem was mounted with. + Flags MountSourceFlags + + // fscache keeps Dirents pinned beyond application references to them. + // It must be flushed before kernel.SaveTo. + fscache *DirentCache `state:"nosave"` + + // direntRefs is the sum of references on all Dirents in this MountSource. + // + // direntRefs is increased when a Dirent in MountSource is IncRef'd, and + // decreased when a Dirent in MountSource is DecRef'd. + // + // To cleanly unmount a MountSource, one must check that no direntRefs are + // held anymore. To check, one must hold root.parent.dirMu of the + // MountSource's root Dirent before reading direntRefs to prevent further + // walks to Dirents in this MountSource. + // + // direntRefs must be atomically changed. + direntRefs uint64 + + // mu protects the fields below, which are set by the MountNamespace + // during MountSource/Unmount. + mu sync.Mutex `state:"nosave"` + + // id is a unique id for this mount. + id uint64 + + // root is the root Dirent of this mount. + root *Dirent + + // parent is the parent MountSource, or nil if this MountSource is the root. + parent *MountSource + + // children are the child MountSources of this MountSource. + children map[*MountSource]struct{} +} + +// defaultDirentCacheSize is the number of Dirents that the VFS can hold an extra +// reference on. +const defaultDirentCacheSize uint64 = 1000 + +// NewMountSource returns a new MountSource. Filesystem may be nil if there is no +// filesystem backing the mount. +func NewMountSource(mops MountSourceOperations, filesystem Filesystem, flags MountSourceFlags) *MountSource { + return &MountSource{ + MountSourceOperations: mops, + Flags: flags, + Filesystem: filesystem, + fscache: NewDirentCache(defaultDirentCacheSize), + children: make(map[*MountSource]struct{}), + } +} + +// Parent returns the parent mount, or nil if this mount is the root. +func (msrc *MountSource) Parent() *MountSource { + msrc.mu.Lock() + defer msrc.mu.Unlock() + return msrc.parent +} + +// ID returns the ID of this mount. +func (msrc *MountSource) ID() uint64 { + msrc.mu.Lock() + defer msrc.mu.Unlock() + return msrc.id +} + +// Children returns the (immediate) children of this MountSource. +func (msrc *MountSource) Children() []*MountSource { + msrc.mu.Lock() + defer msrc.mu.Unlock() + + ms := make([]*MountSource, 0, len(msrc.children)) + for c := range msrc.children { + ms = append(ms, c) + } + return ms +} + +// Submounts returns all mounts that are descendants of this mount. +func (msrc *MountSource) Submounts() []*MountSource { + var ms []*MountSource + for _, c := range msrc.Children() { + ms = append(ms, c) + ms = append(ms, c.Submounts()...) + } + return ms +} + +// Root returns the root dirent of this mount. +func (msrc *MountSource) Root() *Dirent { + msrc.mu.Lock() + defer msrc.mu.Unlock() + return msrc.root +} + +// DirentRefs returns the current mount direntRefs. +func (msrc *MountSource) DirentRefs() uint64 { + return atomic.LoadUint64(&msrc.direntRefs) +} + +// IncDirentRefs increases direntRefs. +func (msrc *MountSource) IncDirentRefs() { + atomic.AddUint64(&msrc.direntRefs, 1) +} + +// DecDirentRefs decrements direntRefs. +func (msrc *MountSource) DecDirentRefs() { + if atomic.AddUint64(&msrc.direntRefs, ^uint64(0)) == ^uint64(0) { + panic("Decremented zero mount reference direntRefs") + } +} + +func (msrc *MountSource) destroy() { + if c := msrc.DirentRefs(); c != 0 { + panic(fmt.Sprintf("MountSource with non-zero direntRefs is being destroyed: %d", c)) + } + msrc.MountSourceOperations.Destroy() +} + +// DecRef drops a reference on the MountSource. +func (msrc *MountSource) DecRef() { + msrc.DecRefWithDestructor(msrc.destroy) +} + +// FlushDirentRefs drops all references held by the MountSource on Dirents. +func (msrc *MountSource) FlushDirentRefs() { + msrc.fscache.Invalidate() +} + +// NewCachingMountSource returns a generic mount that will cache dirents +// aggressively. Filesystem may be nil if there is no backing filesystem. +func NewCachingMountSource(filesystem Filesystem, flags MountSourceFlags) *MountSource { + return NewMountSource(&SimpleMountSourceOperations{ + keep: true, + }, filesystem, flags) +} + +// NewNonCachingMountSource returns a generic mount that will never cache dirents. +// Filesystem may be nil if there is no backing filesystem. +func NewNonCachingMountSource(filesystem Filesystem, flags MountSourceFlags) *MountSource { + return NewMountSource(&SimpleMountSourceOperations{ + keep: false, + }, filesystem, flags) +} + +// SimpleMountSourceOperations implements MountSourceOperations. +type SimpleMountSourceOperations struct { + keep bool +} + +// Revalidate implements MountSourceOperations.Revalidate. +func (*SimpleMountSourceOperations) Revalidate(*Dirent) bool { + return false +} + +// Keep implements MountSourceOperations.Keep. +func (smo *SimpleMountSourceOperations) Keep(*Dirent) bool { + return smo.keep +} + +// ResetInodeMappings implements MountSourceOperations.ResetInodeMappings. +func (*SimpleMountSourceOperations) ResetInodeMappings() {} + +// SaveInodeMapping implements MountSourceOperations.SaveInodeMapping. +func (*SimpleMountSourceOperations) SaveInodeMapping(*Inode, string) {} + +// Destroy implements MountSourceOperations.Destroy. +func (*SimpleMountSourceOperations) Destroy() {} + +// Info defines attributes of a filesystem. +type Info struct { + // Type is the filesystem type magic value. + Type uint64 + + // TotalBlocks is the total data blocks in the filesystem. + TotalBlocks uint64 + + // FreeBlocks is the number of free blocks available. + FreeBlocks uint64 + + // TotalFiles is the total file nodes in the filesystem. + TotalFiles uint64 + + // FreeFiles is the number of free file nodes. + FreeFiles uint64 +} diff --git a/pkg/sentry/fs/mount_overlay.go b/pkg/sentry/fs/mount_overlay.go new file mode 100644 index 000000000..16c25e46c --- /dev/null +++ b/pkg/sentry/fs/mount_overlay.go @@ -0,0 +1,95 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import "gvisor.googlesource.com/gvisor/pkg/sentry/context" + +// overlayMountSourceOperations implements MountSourceOperations for an overlay +// mount point. +type overlayMountSourceOperations struct { + upper *MountSource + lower *MountSource +} + +func newOverlayMountSource(upper, lower *MountSource, flags MountSourceFlags) *MountSource { + upper.IncRef() + lower.IncRef() + return NewMountSource(&overlayMountSourceOperations{ + upper: upper, + lower: lower, + }, &overlayFilesystem{}, flags) +} + +// Revalidate panics if the upper or lower MountSource require that dirent be +// revalidated. Otherwise always returns false. +func (o *overlayMountSourceOperations) Revalidate(dirent *Dirent) bool { + if o.upper.Revalidate(dirent) || o.lower.Revalidate(dirent) { + panic("an overlay cannot revalidate file objects") + } + return false +} + +// Keep returns true if either upper or lower MountSource require that the +// dirent be kept in memory. +func (o *overlayMountSourceOperations) Keep(dirent *Dirent) bool { + return o.upper.Keep(dirent) || o.lower.Keep(dirent) +} + +// ResetInodeMappings propagates the call to both upper and lower MountSource. +func (o *overlayMountSourceOperations) ResetInodeMappings() { + o.upper.ResetInodeMappings() + o.lower.ResetInodeMappings() +} + +// SaveInodeMapping propagates the call to both upper and lower MountSource. +func (o *overlayMountSourceOperations) SaveInodeMapping(inode *Inode, path string) { + inode.overlay.copyMu.RLock() + defer inode.overlay.copyMu.RUnlock() + if inode.overlay.upper != nil { + o.upper.SaveInodeMapping(inode.overlay.upper, path) + } + if inode.overlay.lower != nil { + o.lower.SaveInodeMapping(inode.overlay.lower, path) + } +} + +// Destroy drops references on the upper and lower MountSource. +func (o *overlayMountSourceOperations) Destroy() { + o.upper.DecRef() + o.lower.DecRef() +} + +// type overlayFilesystem is the filesystem for overlay mounts. +type overlayFilesystem struct{} + +// Name implements Filesystem.Name. +func (ofs *overlayFilesystem) Name() string { + return "overlayfs" +} + +// Flags implements Filesystem.Flags. +func (ofs *overlayFilesystem) Flags() FilesystemFlags { + return 0 +} + +// AllowUserMount implements Filesystem.AllowUserMount. +func (ofs *overlayFilesystem) AllowUserMount() bool { + return false +} + +// Mount implements Filesystem.Mount. +func (ofs *overlayFilesystem) Mount(ctx context.Context, device string, flags MountSourceFlags, data string) (*Inode, error) { + panic("overlayFilesystem.Mount should not be called!") +} diff --git a/pkg/sentry/fs/mount_state.go b/pkg/sentry/fs/mount_state.go new file mode 100644 index 000000000..f5ed1dd8d --- /dev/null +++ b/pkg/sentry/fs/mount_state.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// afterLoad is invoked by stateify. +// +// Beyond the cache, this method's existence is required to ensure that this +// object is not marked "complete" until all dependent objects are also marked +// "complete". Implementations (e.g. see gofer_state.go) reach into the +// MountSourceOperations through this object, this is necessary on restore. +func (msrc *MountSource) afterLoad() { + msrc.fscache = NewDirentCache(defaultDirentCacheSize) +} diff --git a/pkg/sentry/fs/mount_test.go b/pkg/sentry/fs/mount_test.go new file mode 100644 index 000000000..3a053c154 --- /dev/null +++ b/pkg/sentry/fs/mount_test.go @@ -0,0 +1,216 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" +) + +// cacheReallyContains iterates through the dirent cache to determine whether +// it contains the given dirent. +func cacheReallyContains(cache *DirentCache, d *Dirent) bool { + for i := cache.list.Front(); i != nil; i = i.Next() { + if i == d { + return true + } + } + return false +} + +// TestMountSourceOnlyCachedOnce tests that a Dirent that is mounted over only ends +// up in a single Dirent Cache. NOTE: Having a dirent in multiple +// caches causes major consistency issues. +func TestMountSourceOnlyCachedOnce(t *testing.T) { + ctx := contexttest.Context(t) + + rootCache := NewDirentCache(100) + rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{ + Type: Directory, + }) + mm, err := NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + rootDirent := mm.Root() + defer rootDirent.DecRef() + + // Get a child of the root which we will mount over. Note that the + // MockInodeOperations causes Walk to always succeed. + child, err := rootDirent.Walk(ctx, rootDirent, "child") + if err != nil { + t.Fatalf("failed to walk to child dirent: %v", err) + } + child.maybeExtendReference() // Cache. + + // Ensure that the root cache contains the child. + if !cacheReallyContains(rootCache, child) { + t.Errorf("wanted rootCache to contain child dirent, but it did not") + } + + // Create a new cache and inode, and mount it over child. + submountCache := NewDirentCache(100) + submountInode := NewMockInode(ctx, NewMockMountSource(submountCache), StableAttr{ + Type: Directory, + }) + if err := mm.Mount(ctx, child, submountInode); err != nil { + t.Fatalf("failed to mount over child: %v", err) + } + + // Walk to the child again. + child2, err := rootDirent.Walk(ctx, rootDirent, "child") + if err != nil { + t.Fatalf("failed to walk to child dirent: %v", err) + } + + // Should have a different Dirent than before. + if child == child2 { + t.Fatalf("expected %v not equal to %v, but they are the same", child, child2) + } + + // Neither of the caches should no contain the child. + if cacheReallyContains(rootCache, child) { + t.Errorf("wanted rootCache not to contain child dirent, but it did") + } + if cacheReallyContains(submountCache, child) { + t.Errorf("wanted submountCache not to contain child dirent, but it did") + } +} + +// Test that mounts have proper parent/child relationships. +func TestMountSourceParentChildRelationship(t *testing.T) { + ctx := contexttest.Context(t) + + rootCache := NewDirentCache(100) + rootInode := NewMockInode(ctx, NewMockMountSource(rootCache), StableAttr{ + Type: Directory, + }) + mm, err := NewMountNamespace(ctx, rootInode) + if err != nil { + t.Fatalf("NewMountNamespace failed: %v", err) + } + rootDirent := mm.Root() + defer rootDirent.DecRef() + + // Add mounts at the following paths: + paths := []string{ + "/foo", + "/foo/bar", + "/foo/bar/baz", + "/foo/qux", + "/waldo", + } + + for _, p := range paths { + d, err := mm.FindLink(ctx, rootDirent, nil, p, 0) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", p, err) + } + submountInode := NewMockInode(ctx, NewMockMountSource(nil), StableAttr{ + Type: Directory, + }) + if err := mm.Mount(ctx, d, submountInode); err != nil { + t.Fatalf("could not mount at %q: %v", p, err) + } + } + + // mm root should contain all submounts (and does not include the root + // mount). + allMountSources := rootDirent.Inode.MountSource.Submounts() + if err := mountPathsAre(rootDirent, allMountSources, paths...); err != nil { + t.Error(err) + } + + // Each mount should have a unique ID. + foundIDs := make(map[uint64]struct{}) + for _, m := range allMountSources { + id := m.ID() + if _, ok := foundIDs[id]; ok { + t.Errorf("got multiple mounts with id %d", id) + } + foundIDs[id] = struct{}{} + } + + // Root mount should have no parent. + rootMountSource := mm.root.Inode.MountSource + if p := rootMountSource.Parent(); p != nil { + t.Errorf("root.Parent got %v wanted nil", p) + } + + // Root mount should have 2 children: foo and waldo. + rootChildren := rootMountSource.Children() + if err := mountPathsAre(rootDirent, rootChildren, "/foo", "/waldo"); err != nil { + t.Error(err) + } + // All root mount children should have root as parent. + for _, c := range rootChildren { + if p := c.Parent(); p != rootMountSource { + t.Errorf("root mount child got parent %+v, wanted root mount", p) + } + } + + // "foo" mount should have two children: /foo/bar, and /foo/qux. + d, err := mm.FindLink(ctx, rootDirent, nil, "/foo", 0) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", "/foo", err) + } + fooMountSource := d.Inode.MountSource + fooMountSourceChildren := fooMountSource.Children() + if err := mountPathsAre(rootDirent, fooMountSourceChildren, "/foo/bar", "/foo/qux"); err != nil { + t.Error(err) + } + // Each child should have fooMountSource as parent. + for _, c := range fooMountSourceChildren { + if p := c.Parent(); p != fooMountSource { + t.Errorf("foo mount child got parent %+v, wanted foo mount", p) + } + } + // Submounts of foo are /foo/bar, /foo/qux, and /foo/bar/baz. + if err := mountPathsAre(rootDirent, fooMountSource.Submounts(), "/foo/bar", "/foo/qux", "/foo/bar/baz"); err != nil { + t.Error(err) + } + + // "waldo" mount should have no submounts or children. + waldo, err := mm.FindLink(ctx, rootDirent, nil, "/waldo", 0) + if err != nil { + t.Fatalf("could not find path %q in mount manager: %v", "/waldo", err) + } + waldoMountSource := waldo.Inode.MountSource + if got := len(waldoMountSource.Children()); got != 0 { + t.Errorf("waldo got %d children, wanted 0", got) + } + if got := len(waldoMountSource.Submounts()); got != 0 { + t.Errorf("waldo got %d children, wanted 0", got) + } +} + +func mountPathsAre(root *Dirent, got []*MountSource, want ...string) error { + if len(got) != len(want) { + return fmt.Errorf("mount paths have different lengths: got %d want %d", len(got), len(want)) + } + gotPaths := make(map[string]struct{}, len(got)) + for _, g := range got { + n, _ := g.Root().FullName(root) + gotPaths[n] = struct{}{} + } + for _, w := range want { + if _, ok := gotPaths[w]; !ok { + return fmt.Errorf("no mount with path %q found", w) + } + } + return nil +} diff --git a/pkg/sentry/fs/mounts.go b/pkg/sentry/fs/mounts.go new file mode 100644 index 000000000..1e6b5b70e --- /dev/null +++ b/pkg/sentry/fs/mounts.go @@ -0,0 +1,511 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// DefaultTraversalLimit provides a sensible default traversal limit that may +// be passed to FindInode and FindLink. You may want to provide other options in +// individual syscall implementations, but for internal functions this will be +// sane. +const DefaultTraversalLimit = 10 + +// MountNamespace defines a collection of mounts. +type MountNamespace struct { + refs.AtomicRefCount + + // userns is the user namespace associated with this mount namespace. + // + // All privileged operations on this mount namespace must have + // appropriate capabilities in this userns. + // + // userns is immutable. + userns *auth.UserNamespace + + // root is the root directory. + root *Dirent + + // mu protects mounts and mountID counter. + mu sync.Mutex `state:"nosave"` + + // mounts is a map of the last mounted Dirent -> stack of old Dirents + // that were mounted over, with the oldest mounted Dirent first and + // more recent mounted Dirents at the end of the slice. + // + // A reference to all Dirents in mounts (keys and values) must be held + // to ensure the Dirents are recoverable when unmounting. + mounts map[*Dirent][]*Dirent + + // mountID is the next mount id to assign. + mountID uint64 +} + +// NewMountNamespace returns a new MountNamespace, with the provided node at the +// root, and the given cache size. A root must always be provided. +func NewMountNamespace(ctx context.Context, root *Inode) (*MountNamespace, error) { + creds := auth.CredentialsFromContext(ctx) + + root.MountSource.mu.Lock() + defer root.MountSource.mu.Unlock() + + // Set the root dirent and id on the root mount. + d := NewDirent(root, "/") + root.MountSource.root = d + root.MountSource.id = 1 + + return &MountNamespace{ + userns: creds.UserNamespace, + root: d, + mounts: make(map[*Dirent][]*Dirent), + mountID: 2, + }, nil +} + +// UserNamespace returns the user namespace associated with this mount manager. +func (mns *MountNamespace) UserNamespace() *auth.UserNamespace { + return mns.userns +} + +// Root returns the MountNamespace's root Dirent and increments its reference +// count. The caller must call DecRef when finished. +func (mns *MountNamespace) Root() *Dirent { + mns.root.IncRef() + return mns.root +} + +// FlushMountSourceRefs flushes extra references held by MountSources for all active mount points; +// see fs/mount.go:MountSource.FlushDirentRefs. +func (mns *MountNamespace) FlushMountSourceRefs() { + mns.mu.Lock() + defer mns.mu.Unlock() + mns.flushMountSourceRefsLocked() +} + +func (mns *MountNamespace) flushMountSourceRefsLocked() { + // Flush mounts' MountSource references. + for current, stack := range mns.mounts { + current.Inode.MountSource.FlushDirentRefs() + for _, prev := range stack { + prev.Inode.MountSource.FlushDirentRefs() + } + } + + // Flush root's MountSource references. + mns.root.Inode.MountSource.FlushDirentRefs() +} + +// destroy drops root and mounts dirent references and closes any original nodes. +// +// After destroy is called, the MountNamespace may continue to be referenced (for +// example via /proc/mounts), but should free all resources and shouldn't have +// Find* methods called. +func (mns *MountNamespace) destroy() { + mns.mu.Lock() + defer mns.mu.Unlock() + + // Flush all mounts' MountSource references to Dirents. This allows for mount + // points to be torn down since there should be no remaining references after + // this and DecRef below. + mns.flushMountSourceRefsLocked() + + // Teardown mounts. + for current, mp := range mns.mounts { + // Drop the mount reference on all mounted dirents. + for _, d := range mp { + d.DecRef() + } + current.DecRef() + } + mns.mounts = nil + + // Drop reference on the root. + mns.root.DecRef() + + // Wait for asynchronous work (queued by dropping Dirent references + // above) to complete before destroying this MountNamespace. + AsyncBarrier() +} + +// DecRef implements RefCounter.DecRef with destructor mns.destroy. +func (mns *MountNamespace) DecRef() { + mns.DecRefWithDestructor(mns.destroy) +} + +// Freeze freezes the entire mount tree. +func (mns *MountNamespace) Freeze() { + mns.mu.Lock() + defer mns.mu.Unlock() + + // We only want to freeze Dirents with active references, not Dirents referenced + // by a mount's MountSource. + mns.flushMountSourceRefsLocked() + + // Freeze the entire shebang. + mns.root.Freeze() +} + +// withMountLocked prevents further walks to `node`, because `node` is about to +// be a mount point. +func (mns *MountNamespace) withMountLocked(node *Dirent, fn func() error) error { + mns.mu.Lock() + defer mns.mu.Unlock() + + renameMu.Lock() + defer renameMu.Unlock() + + // Linux allows mounting over the root (?). It comes with a strange set + // of semantics. We'll just not do this for now. + if node.parent == nil { + return syserror.EBUSY + } + + // For both mount and unmount, we take this lock so we can swap out the + // appropriate child in parent.children. + // + // For unmount, this also ensures that if `node` is a mount point, the + // underlying mount's MountSource.direntRefs cannot increase by preventing + // walks to node. + node.parent.dirMu.Lock() + defer node.parent.dirMu.Unlock() + + node.parent.mu.Lock() + defer node.parent.mu.Unlock() + + // We need not take node.dirMu since we have parent.dirMu. + + // We need to take node.mu, so that we can check for deletion. + node.mu.Lock() + defer node.mu.Unlock() + + return fn() +} + +// Mount mounts a `inode` over the subtree at `node`. +func (mns *MountNamespace) Mount(ctx context.Context, node *Dirent, inode *Inode) error { + return mns.withMountLocked(node, func() error { + // replacement already has one reference taken; this is the mount + // reference. + replacement, err := node.mount(ctx, inode) + if err != nil { + return err + } + + // Set child/parent dirent relationship. + parentMountSource := node.Inode.MountSource + childMountSource := inode.MountSource + parentMountSource.mu.Lock() + defer parentMountSource.mu.Unlock() + childMountSource.mu.Lock() + defer childMountSource.mu.Unlock() + + parentMountSource.children[childMountSource] = struct{}{} + childMountSource.parent = parentMountSource + + // Set the mount's root dirent and id. + childMountSource.root = replacement + childMountSource.id = mns.mountID + mns.mountID++ + + // Drop node from its dirent cache. + node.dropExtendedReference() + + // If node is already a mount point, push node on the stack so it can + // be recovered on unmount. + if stack, ok := mns.mounts[node]; ok { + mns.mounts[replacement] = append(stack, node) + delete(mns.mounts, node) + return nil + } + + // Was not already mounted, just add another mount point. + // Take a reference on node so it can be recovered on unmount. + node.IncRef() + mns.mounts[replacement] = []*Dirent{node} + return nil + }) +} + +// Unmount ensures no references to the MountSource remain and removes `node` from +// this subtree. The subtree formerly mounted in `node`'s place will be +// restored. node's MountSource will be destroyed as soon as the last reference to +// `node` is dropped, as no references to Dirents within will remain. +// +// If detachOnly is set, Unmount merely removes `node` from the subtree, but +// allows existing references to the MountSource remain. E.g. if an open file still +// refers to Dirents in MountSource, the Unmount will succeed anyway and MountSource will +// be destroyed at a later time when all references to Dirents within are +// dropped. +// +// The caller must hold a reference to node from walking to it. +func (mns *MountNamespace) Unmount(ctx context.Context, node *Dirent, detachOnly bool) error { + // This takes locks to prevent further walks to Dirents in this mount + // under the assumption that `node` is the root of the mount. + return mns.withMountLocked(node, func() error { + origs, ok := mns.mounts[node] + if !ok { + // node is not a mount point. + return syserror.EINVAL + } + + if len(origs) == 0 { + panic("cannot unmount initial dirent") + } + + if !detachOnly { + m := node.Inode.MountSource + + // Lock the parent MountSource first, if it exists. We are + // holding mns.Lock, so the parent can not change out + // from under us. + parent := m.Parent() + if parent != nil { + parent.mu.Lock() + defer parent.mu.Unlock() + } + + // Lock the mount that is being unmounted. + m.mu.Lock() + defer m.mu.Unlock() + + if m.parent != nil { + // Sanity check. + if _, ok := m.parent.children[m]; !ok { + panic(fmt.Sprintf("mount %+v is not a child of parent %+v", m, m.parent)) + } + delete(m.parent.children, m) + m.parent = nil + } + + // Flush all references on the mounted node. + m.FlushDirentRefs() + + // At this point, exactly two references must be held + // to mount: one mount reference on node, and one due + // to walking to node. + // + // We must also be guaranteed that no more references + // can be taken on mount. This is why withMountLocked + // must be held at this point to prevent any walks to + // and from node. + if refs := m.DirentRefs(); refs < 2 { + panic(fmt.Sprintf("have %d refs on unmount, expect 2 or more", refs)) + } else if refs != 2 { + return syserror.EBUSY + } + } + + original := origs[len(origs)-1] + if err := node.unmount(ctx, original); err != nil { + return err + } + + switch { + case len(origs) > 1: + mns.mounts[original] = origs[:len(origs)-1] + case len(origs) == 1: + // Drop mount reference taken at the end of + // MountNamespace.Mount. + original.DecRef() + } + + delete(mns.mounts, node) + return nil + }) +} + +// FindLink returns an Dirent from a given node, which may be a symlink. +// +// The root argument is treated as the root directory, and FindLink will not +// return anything above that. The wd dirent provides the starting directory, +// and may be nil which indicates the root should be used. You must call DecRef +// on the resulting Dirent when you are no longer using the object. +// +// If wd is nil, then the root will be used as the working directory. If the +// path is absolute, this has no functional impact. +// +// Precondition: root must be non-nil. +// Precondition: the path must be non-empty. +func (mns *MountNamespace) FindLink(ctx context.Context, root, wd *Dirent, path string, maxTraversals uint) (*Dirent, error) { + if root == nil { + panic("MountNamespace.FindInode: root must not be nil") + } + if len(path) == 0 { + panic("MountNamespace.FindInode: path is empty") + } + + // Split the path. + first, remainder := SplitFirst(path) + + // Where does this walk originate? + current := wd + if current == nil { + current = root + } + for first == "/" { + // Special case: it's possible that we have nothing to walk at + // all. This is necessary since we're resplitting the path. + if remainder == "" { + root.IncRef() + return root, nil + } + + // Start at the root and advance the path component so that the + // walk below can proceed. Note at this point, it handles the + // no-op walk case perfectly fine. + current = root + first, remainder = SplitFirst(remainder) + } + + current.IncRef() // Transferred during walk. + + for { + // Check that the file is a directory and that we have + // permissions to walk. + // + // Note that we elide this check for the root directory as an + // optimization; a non-executable root may still be walked. A + // non-directory root is hopeless. + if current != root { + if !IsDir(current.Inode.StableAttr) { + current.DecRef() // Drop reference from above. + return nil, syserror.ENOTDIR + } + if err := current.Inode.CheckPermission(ctx, PermMask{Execute: true}); err != nil { + current.DecRef() // Drop reference from above. + return nil, err + } + } + + // Move to the next level. + next, err := current.Walk(ctx, root, first) + if err != nil { + // Allow failed walks to cache the dirent, because no + // children will acquire a reference at the end. + current.maybeExtendReference() + current.DecRef() + return nil, err + } + + // Drop old reference. + current.DecRef() + + if remainder != "" { + // Ensure it's resolved, unless it's the last level. + // + // See resolve for reference semantics; on err next + // will have one dropped. + current, err = mns.resolve(ctx, root, next, maxTraversals) + if err != nil { + return nil, err + } + } else { + // Allow the file system to take an extra reference on the + // found child. This will hold a reference on the containing + // directory, so the whole tree will be implicitly cached. + next.maybeExtendReference() + return next, nil + } + + // Move to the next element. + first, remainder = SplitFirst(remainder) + } +} + +// FindInode is identical to FindLink except the return value is resolved. +// +//go:nosplit +func (mns *MountNamespace) FindInode(ctx context.Context, root, wd *Dirent, path string, maxTraversals uint) (*Dirent, error) { + d, err := mns.FindLink(ctx, root, wd, path, maxTraversals) + if err != nil { + return nil, err + } + + // See resolve for reference semantics; on err d will have the + // reference dropped. + return mns.resolve(ctx, root, d, maxTraversals) +} + +// resolve resolves the given link. +// +// If successful, a reference is dropped on node and one is acquired on the +// caller's behalf for the returned dirent. +// +// If not successful, a reference is _also_ dropped on the node and an error +// returned. This is for convenience in using resolve directly as a return +// value. +func (mns *MountNamespace) resolve(ctx context.Context, root, node *Dirent, maxTraversals uint) (*Dirent, error) { + // Resolve the path. + target, err := node.Inode.Getlink(ctx) + + switch err { + case nil: + // Make sure we didn't exhaust the traversal budget. + if maxTraversals == 0 { + target.DecRef() + return nil, syscall.ELOOP + } + + node.DecRef() // Drop the original reference. + return target, nil + + case syscall.ENOLINK: + // Not a symlink. + return node, nil + + case ErrResolveViaReadlink: + defer node.DecRef() // See above. + + // First, check if we should traverse. + if maxTraversals == 0 { + return nil, syscall.ELOOP + } + + // Read the target path. + targetPath, err := node.Inode.Readlink(ctx) + if err != nil { + return nil, err + } + + // Find the node; we resolve relative to the current symlink's parent. + d, err := mns.FindInode(ctx, root, node.parent, targetPath, maxTraversals-1) + if err != nil { + return nil, err + } + + return d, err + + default: + node.DecRef() // Drop for err; see above. + + // Propagate the error. + return nil, err + } +} + +// SyncAll calls Dirent.SyncAll on the root. +func (mns *MountNamespace) SyncAll(ctx context.Context) { + mns.mu.Lock() + defer mns.mu.Unlock() + mns.root.SyncAll(ctx) +} diff --git a/pkg/sentry/fs/mounts_test.go b/pkg/sentry/fs/mounts_test.go new file mode 100644 index 000000000..8669f3a38 --- /dev/null +++ b/pkg/sentry/fs/mounts_test.go @@ -0,0 +1,102 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test" +) + +// Creates a new MountNamespace with filesystem: +// / (root dir) +// |-foo (dir) +// |-bar (file) +func createMountNamespace(ctx context.Context) (*fs.MountNamespace, error) { + perms := fs.FilePermsFromMode(0777) + m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + + barFile := ramfstest.NewFile(ctx, perms) + fooDir := ramfstest.NewDir(ctx, map[string]*fs.Inode{ + "bar": fs.NewInode(barFile, m, fs.StableAttr{Type: fs.RegularFile}), + }, perms) + rootDir := ramfstest.NewDir(ctx, map[string]*fs.Inode{ + "foo": fs.NewInode(fooDir, m, fs.StableAttr{Type: fs.Directory}), + }, perms) + + return fs.NewMountNamespace(ctx, fs.NewInode(rootDir, m, fs.StableAttr{Type: fs.Directory})) +} + +func TestFindLink(t *testing.T) { + ctx := contexttest.Context(t) + mm, err := createMountNamespace(ctx) + if err != nil { + t.Fatalf("createMountNamespace failed: %v", err) + } + + root := mm.Root() + defer root.DecRef() + foo, err := root.Walk(ctx, root, "foo") + if err != nil { + t.Fatalf("Error walking to foo: %v", err) + } + + // Positive cases. + for _, tc := range []struct { + findPath string + wd *fs.Dirent + wantPath string + }{ + {".", root, "/"}, + {".", foo, "/foo"}, + {"..", foo, "/"}, + {"../../..", foo, "/"}, + {"///foo", foo, "/foo"}, + {"/foo", foo, "/foo"}, + {"/foo/bar", foo, "/foo/bar"}, + {"/foo/.///./bar", foo, "/foo/bar"}, + {"/foo///bar", foo, "/foo/bar"}, + {"/foo/../foo/bar", foo, "/foo/bar"}, + {"foo/bar", root, "/foo/bar"}, + {"foo////bar", root, "/foo/bar"}, + {"bar", foo, "/foo/bar"}, + } { + wdPath, _ := tc.wd.FullName(root) + if d, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, 0); err != nil { + t.Errorf("FindLink(%q, wd=%q) failed: %v", tc.findPath, wdPath, err) + } else if got, _ := d.FullName(root); got != tc.wantPath { + t.Errorf("FindLink(%q, wd=%q) got dirent %q, want %q", tc.findPath, wdPath, got, tc.wantPath) + } + } + + // Negative cases. + for _, tc := range []struct { + findPath string + wd *fs.Dirent + }{ + {"bar", root}, + {"/bar", root}, + {"/foo/../../bar", root}, + {"foo", foo}, + } { + wdPath, _ := tc.wd.FullName(root) + if _, err := mm.FindLink(ctx, root, tc.wd, tc.findPath, 0); err == nil { + t.Errorf("FindLink(%q, wd=%q) did not return error", tc.findPath, wdPath) + } + } +} diff --git a/pkg/sentry/fs/offset.go b/pkg/sentry/fs/offset.go new file mode 100644 index 000000000..7cc8398e6 --- /dev/null +++ b/pkg/sentry/fs/offset.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// OffsetPageEnd returns the file offset rounded up to the nearest +// page boundary. OffsetPageEnd panics if rounding up causes overflow, +// which shouldn't be possible given that offset is an int64. +func OffsetPageEnd(offset int64) uint64 { + end, ok := usermem.Addr(offset).RoundUp() + if !ok { + panic("impossible overflow") + } + return uint64(end) +} + +// ReadEndOffset returns an exclusive end offset for a read operation +// so that the read does not overflow an int64 nor size. +// +// Parameters: +// - offset: the starting offset of the read. +// - length: the number of bytes to read. +// - size: the size of the file. +// +// Postconditions: The returned offset is >= offset. +func ReadEndOffset(offset int64, length int64, size int64) int64 { + if offset >= size { + return offset + } + end := offset + length + // Don't overflow. + if end < offset || end > size { + end = size + } + return end +} + +// WriteEndOffset returns an exclusive end offset for a write operation +// so that the write does not overflow an int64. +// +// Parameters: +// - offset: the starting offset of the write. +// - length: the number of bytes to write. +// +// Postconditions: The returned offset is >= offset. +func WriteEndOffset(offset int64, length int64) int64 { + return ReadEndOffset(offset, length, math.MaxInt64) +} diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go new file mode 100644 index 000000000..40eed3feb --- /dev/null +++ b/pkg/sentry/fs/overlay.go @@ -0,0 +1,268 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// The virtual filesystem implements an overlay configuration. For a high-level +// description, see README.md. +// +// Note on whiteouts: +// +// This implementation does not use the "Docker-style" whiteouts (symlinks with +// ".wh." prefix). Instead upper filesystem directories support a set of extended +// attributes to encode whiteouts: "trusted.overlay.whiteout.<filename>". This +// gives flexibility to persist whiteouts independently of the filesystem layout +// while additionally preventing name conflicts with files prefixed with ".wh.". +// +// Known deficiencies: +// +// - The device number of two files under the same overlay mount point may be +// different. This can happen if a file is found in the lower filesystem (takes +// the lower filesystem device) and another file is created in the upper +// filesystem (takes the upper filesystem device). This may appear odd but +// should not break applications. +// +// - Registered events on files (i.e. for notification of read/write readiness) +// are not copied across copy up. This is fine in the common case of files that +// do not block. For files that do block, like pipes and sockets, copy up is not +// supported. +// +// - Hardlinks in a lower filesystem are broken by copy up. For this reason, no +// attempt is made to preserve link count across copy up. +// +// - The maximum length of an extended attribute name is the same as the maximum +// length of a file path in Linux (XATTR_NAME_MAX == NAME_MAX). This means that +// whiteout attributes, if set directly on the host, are limited additionally by +// the extra whiteout prefix length (file paths must be strictly shorter than +// NAME_MAX). This is not a problem for in-memory filesystems which don't enforce +// XATTR_NAME_MAX. + +const ( + // XattrOverlayPrefix is the prefix for extended attributes that affect + // the behavior of an overlay. + XattrOverlayPrefix = "trusted.overlay." + + // XattrOverlayWhiteoutPrefix is the prefix for extended attributes + // that indicate that a whiteout exists. + XattrOverlayWhiteoutPrefix = XattrOverlayPrefix + "whiteout." +) + +// XattrOverlayWhiteout returns an extended attribute that indicates a +// whiteout exists for name. It is supported by directories that wish to +// mask the existence of name. +func XattrOverlayWhiteout(name string) string { + return XattrOverlayWhiteoutPrefix + name +} + +// NewOverlayRoot produces the root of an overlay. +// +// Preconditions: +// +// - upper and lower must be non-nil. +// - lower should not expose character devices, pipes, or sockets, because +// copying up these types of files is not supported. +// - upper and lower must not require that file objects be revalidated. +// - upper and lower must not have dynamic file/directory content. +func NewOverlayRoot(ctx context.Context, upper *Inode, lower *Inode, flags MountSourceFlags) (*Inode, error) { + if !IsDir(upper.StableAttr) { + return nil, fmt.Errorf("upper Inode is not a directory") + } + if !IsDir(lower.StableAttr) { + return nil, fmt.Errorf("lower Inode is not a directory") + } + + msrc := newOverlayMountSource(upper.MountSource, lower.MountSource, flags) + overlay, err := newOverlayEntry(ctx, upper, lower, true) + if err != nil { + msrc.DecRef() + return nil, err + } + + return newOverlayInode(ctx, overlay, msrc), nil +} + +// newOverlayInode creates a new Inode for an overlay. +func newOverlayInode(ctx context.Context, o *overlayEntry, msrc *MountSource) *Inode { + var inode *Inode + if o.upper != nil { + inode = NewInode(nil, msrc, o.upper.StableAttr) + } else { + inode = NewInode(nil, msrc, o.lower.StableAttr) + } + inode.overlay = o + return inode +} + +// overlayEntry is the overlay metadata of an Inode. It implements Mappable. +type overlayEntry struct { + // lowerExists is true if an Inode exists for this file in the lower + // filesystem. If lowerExists is true, then the overlay must create + // a whiteout entry when renaming and removing this entry to mask the + // lower Inode. + // + // Note that this is distinct from actually holding onto a non-nil + // lower Inode (below). The overlay does not need to keep a lower Inode + // around unless it needs to operate on it, but it always needs to know + // whether the lower Inode exists to correctly execute a rename or + // remove operation. + lowerExists bool + + // lower is an Inode from a lower filesystem. Modifications are + // never made on this Inode. + lower *Inode + + // copyMu serializes copy-up for operations above + // mm.MemoryManager.mappingMu in the lock order. + copyMu sync.RWMutex `state:"nosave"` + + // mapsMu serializes copy-up for operations between + // mm.MemoryManager.mappingMu and mm.MemoryManager.activeMu in the lock + // order. + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks memory mappings of this Mappable so they can be removed + // from the lower filesystem Mappable and added to the upper filesystem + // Mappable when copy up occurs. It is strictly unnecessary after copy-up. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + // dataMu serializes copy-up for operations below mm.MemoryManager.activeMu + // in the lock order. + dataMu sync.RWMutex `state:"nosave"` + + // upper is an Inode from an upper filesystem. It is non-nil if + // the file exists in the upper filesystem. It becomes non-nil + // when the Inode that owns this overlayEntry is modified. + // + // upper is protected by all of copyMu, mapsMu, and dataMu. Holding any of + // these locks is sufficient to read upper; holding all three for writing + // is required to mutate it. + upper *Inode +} + +// newOverlayEntry returns a new overlayEntry. +func newOverlayEntry(ctx context.Context, upper *Inode, lower *Inode, lowerExists bool) (*overlayEntry, error) { + if upper == nil && lower == nil { + panic("invalid overlayEntry, needs at least one Inode") + } + if upper != nil && upper.overlay != nil { + panic("nested writable layers are not supported") + } + // Check for supported lower filesystem types. + if lower != nil { + switch lower.StableAttr.Type { + case RegularFile, Directory, Symlink, Socket: + default: + // We don't support copying up from character devices, + // named pipes, or anything weird (like proc files). + log.Warningf("%s not supported in lower filesytem", lower.StableAttr.Type) + return nil, syserror.EINVAL + } + } + return &overlayEntry{ + lowerExists: lowerExists, + lower: lower, + upper: upper, + }, nil +} + +func (o *overlayEntry) release() { + // We drop a reference on upper and lower file system Inodes + // rather than releasing them, because in-memory filesystems + // may hold an extra reference to these Inodes so that they + // stay in memory. + if o.upper != nil { + o.upper.DecRef() + } + if o.lower != nil { + o.lower.DecRef() + } +} + +// overlayUpperMountSource gives the upper mount of an overlay mount. +// +// The caller may not use this MountSource past the lifetime of overlayMountSource and may +// not call DecRef on it. +func overlayUpperMountSource(overlayMountSource *MountSource) *MountSource { + return overlayMountSource.MountSourceOperations.(*overlayMountSourceOperations).upper +} + +// Preconditions: At least one of o.copyMu, o.mapsMu, or o.dataMu must be locked. +func (o *overlayEntry) inodeLocked() *Inode { + if o.upper != nil { + return o.upper + } + return o.lower +} + +// Preconditions: At least one of o.copyMu, o.mapsMu, or o.dataMu must be locked. +func (o *overlayEntry) isMappableLocked() bool { + return o.inodeLocked().Mappable() != nil +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (o *overlayEntry) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + if err := o.inodeLocked().Mappable().AddMapping(ctx, ms, ar, offset); err != nil { + return err + } + o.mappings.AddMapping(ms, ar, offset) + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (o *overlayEntry) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + o.inodeLocked().Mappable().RemoveMapping(ctx, ms, ar, offset) + o.mappings.RemoveMapping(ms, ar, offset) +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (o *overlayEntry) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + if err := o.inodeLocked().Mappable().CopyMapping(ctx, ms, srcAR, dstAR, offset); err != nil { + return err + } + o.mappings.AddMapping(ms, dstAR, offset) + return nil +} + +// Translate implements memmap.Mappable.Translate. +func (o *overlayEntry) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + o.dataMu.RLock() + defer o.dataMu.RUnlock() + return o.inodeLocked().Mappable().Translate(ctx, required, optional, at) +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (o *overlayEntry) InvalidateUnsavable(ctx context.Context) error { + o.mapsMu.Lock() + defer o.mapsMu.Unlock() + return o.inodeLocked().Mappable().InvalidateUnsavable(ctx) +} diff --git a/pkg/sentry/fs/path.go b/pkg/sentry/fs/path.go new file mode 100644 index 000000000..b74f6ed8c --- /dev/null +++ b/pkg/sentry/fs/path.go @@ -0,0 +1,92 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// TrimTrailingSlashes trims any trailing slashes. +// +// The returned boolean indicates whether any changes were made. +// +//go:nosplit +func TrimTrailingSlashes(dir string) (trimmed string, changed bool) { + // Trim the trailing slash, except for root. + for len(dir) > 1 && dir[len(dir)-1] == '/' { + dir = dir[:len(dir)-1] + changed = true + } + return dir, changed +} + +// SplitLast splits the given path into a directory and a file. +// +// The "absoluteness" of the path is preserved, but dir is always stripped of +// trailing slashes. +// +//go:nosplit +func SplitLast(path string) (dir, file string) { + path, _ = TrimTrailingSlashes(path) + if path == "" { + return ".", "." + } else if path == "/" { + return "/", "." + } + + var slash int // Last location of slash in path. + for slash = len(path) - 1; slash >= 0 && path[slash] != '/'; slash-- { + } + switch { + case slash < 0: + return ".", path + case slash == 0: + // Directory of the form "/foo", or just "/". We need to + // preserve the first slash here, since it indicates an + // absolute path. + return "/", path[1:] + default: + // Drop the trailing slash. + dir, _ = TrimTrailingSlashes(path[:slash]) + return dir, path[slash+1:] + } +} + +// SplitFirst splits the given path into a first directory and the remainder. +// +// If remainder is empty, then the path is a single element. +// +//go:nosplit +func SplitFirst(path string) (current, remainder string) { + path, _ = TrimTrailingSlashes(path) + if path == "" { + return ".", "" + } + + var slash int // First location of slash in path. + for slash = 0; slash < len(path) && path[slash] != '/'; slash++ { + } + switch { + case slash >= len(path): + return path, "" + case slash == 0: + // See above. + return "/", path[1:] + default: + current = path[:slash] + remainder = path[slash+1:] + // Strip redundant slashes. + for len(remainder) > 0 && remainder[0] == '/' { + remainder = remainder[1:] + } + return current, remainder + } +} diff --git a/pkg/sentry/fs/path_test.go b/pkg/sentry/fs/path_test.go new file mode 100644 index 000000000..7ab070855 --- /dev/null +++ b/pkg/sentry/fs/path_test.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "testing" +) + +// TestSplitLast tests variants of path splitting. +func TestSplitLast(t *testing.T) { + cases := []struct { + path string + dir string + file string + }{ + {path: "/", dir: "/", file: "."}, + {path: "/.", dir: "/", file: "."}, + {path: "/./", dir: "/", file: "."}, + {path: "/./.", dir: "/.", file: "."}, + {path: "/././", dir: "/.", file: "."}, + {path: "/./..", dir: "/.", file: ".."}, + {path: "/./../", dir: "/.", file: ".."}, + {path: "/..", dir: "/", file: ".."}, + {path: "/../", dir: "/", file: ".."}, + {path: "/../.", dir: "/..", file: "."}, + {path: "/.././", dir: "/..", file: "."}, + {path: "/../..", dir: "/..", file: ".."}, + {path: "/../../", dir: "/..", file: ".."}, + + {path: "", dir: ".", file: "."}, + {path: ".", dir: ".", file: "."}, + {path: "./", dir: ".", file: "."}, + {path: "./.", dir: ".", file: "."}, + {path: "././", dir: ".", file: "."}, + {path: "./..", dir: ".", file: ".."}, + {path: "./../", dir: ".", file: ".."}, + {path: "..", dir: ".", file: ".."}, + {path: "../", dir: ".", file: ".."}, + {path: "../.", dir: "..", file: "."}, + {path: ".././", dir: "..", file: "."}, + {path: "../..", dir: "..", file: ".."}, + {path: "../../", dir: "..", file: ".."}, + + {path: "/foo", dir: "/", file: "foo"}, + {path: "/foo/", dir: "/", file: "foo"}, + {path: "/foo/.", dir: "/foo", file: "."}, + {path: "/foo/./", dir: "/foo", file: "."}, + {path: "/foo/./.", dir: "/foo/.", file: "."}, + {path: "/foo/./..", dir: "/foo/.", file: ".."}, + {path: "/foo/..", dir: "/foo", file: ".."}, + {path: "/foo/../", dir: "/foo", file: ".."}, + {path: "/foo/../.", dir: "/foo/..", file: "."}, + {path: "/foo/../..", dir: "/foo/..", file: ".."}, + + {path: "/foo/bar", dir: "/foo", file: "bar"}, + {path: "/foo/bar/", dir: "/foo", file: "bar"}, + {path: "/foo/bar/.", dir: "/foo/bar", file: "."}, + {path: "/foo/bar/./", dir: "/foo/bar", file: "."}, + {path: "/foo/bar/./.", dir: "/foo/bar/.", file: "."}, + {path: "/foo/bar/./..", dir: "/foo/bar/.", file: ".."}, + {path: "/foo/bar/..", dir: "/foo/bar", file: ".."}, + {path: "/foo/bar/../", dir: "/foo/bar", file: ".."}, + {path: "/foo/bar/../.", dir: "/foo/bar/..", file: "."}, + {path: "/foo/bar/../..", dir: "/foo/bar/..", file: ".."}, + + {path: "foo", dir: ".", file: "foo"}, + {path: "foo", dir: ".", file: "foo"}, + {path: "foo/", dir: ".", file: "foo"}, + {path: "foo/.", dir: "foo", file: "."}, + {path: "foo/./", dir: "foo", file: "."}, + {path: "foo/./.", dir: "foo/.", file: "."}, + {path: "foo/./..", dir: "foo/.", file: ".."}, + {path: "foo/..", dir: "foo", file: ".."}, + {path: "foo/../", dir: "foo", file: ".."}, + {path: "foo/../.", dir: "foo/..", file: "."}, + {path: "foo/../..", dir: "foo/..", file: ".."}, + {path: "foo/", dir: ".", file: "foo"}, + {path: "foo/.", dir: "foo", file: "."}, + + {path: "foo/bar", dir: "foo", file: "bar"}, + {path: "foo/bar/", dir: "foo", file: "bar"}, + {path: "foo/bar/.", dir: "foo/bar", file: "."}, + {path: "foo/bar/./", dir: "foo/bar", file: "."}, + {path: "foo/bar/./.", dir: "foo/bar/.", file: "."}, + {path: "foo/bar/./..", dir: "foo/bar/.", file: ".."}, + {path: "foo/bar/..", dir: "foo/bar", file: ".."}, + {path: "foo/bar/../", dir: "foo/bar", file: ".."}, + {path: "foo/bar/../.", dir: "foo/bar/..", file: "."}, + {path: "foo/bar/../..", dir: "foo/bar/..", file: ".."}, + {path: "foo/bar/", dir: "foo", file: "bar"}, + {path: "foo/bar/.", dir: "foo/bar", file: "."}, + } + + for _, c := range cases { + dir, file := SplitLast(c.path) + if dir != c.dir || file != c.file { + t.Errorf("SplitLast(%q) got (%q, %q), expected (%q, %q)", c.path, dir, file, c.dir, c.file) + } + } +} + +// TestSplitFirst tests variants of path splitting. +func TestSplitFirst(t *testing.T) { + cases := []struct { + path string + first string + remainder string + }{ + {path: "/", first: "/", remainder: ""}, + {path: "/.", first: "/", remainder: "."}, + {path: "///.", first: "/", remainder: "//."}, + {path: "/.///", first: "/", remainder: "."}, + {path: "/./.", first: "/", remainder: "./."}, + {path: "/././", first: "/", remainder: "./."}, + {path: "/./..", first: "/", remainder: "./.."}, + {path: "/./../", first: "/", remainder: "./.."}, + {path: "/..", first: "/", remainder: ".."}, + {path: "/../", first: "/", remainder: ".."}, + {path: "/../.", first: "/", remainder: "../."}, + {path: "/.././", first: "/", remainder: "../."}, + {path: "/../..", first: "/", remainder: "../.."}, + {path: "/../../", first: "/", remainder: "../.."}, + + {path: "", first: ".", remainder: ""}, + {path: ".", first: ".", remainder: ""}, + {path: "./", first: ".", remainder: ""}, + {path: ".///", first: ".", remainder: ""}, + {path: "./.", first: ".", remainder: "."}, + {path: "././", first: ".", remainder: "."}, + {path: "./..", first: ".", remainder: ".."}, + {path: "./../", first: ".", remainder: ".."}, + {path: "..", first: "..", remainder: ""}, + {path: "../", first: "..", remainder: ""}, + {path: "../.", first: "..", remainder: "."}, + {path: ".././", first: "..", remainder: "."}, + {path: "../..", first: "..", remainder: ".."}, + {path: "../../", first: "..", remainder: ".."}, + + {path: "/foo", first: "/", remainder: "foo"}, + {path: "/foo/", first: "/", remainder: "foo"}, + {path: "/foo///", first: "/", remainder: "foo"}, + {path: "/foo/.", first: "/", remainder: "foo/."}, + {path: "/foo/./", first: "/", remainder: "foo/."}, + {path: "/foo/./.", first: "/", remainder: "foo/./."}, + {path: "/foo/./..", first: "/", remainder: "foo/./.."}, + {path: "/foo/..", first: "/", remainder: "foo/.."}, + {path: "/foo/../", first: "/", remainder: "foo/.."}, + {path: "/foo/../.", first: "/", remainder: "foo/../."}, + {path: "/foo/../..", first: "/", remainder: "foo/../.."}, + + {path: "/foo/bar", first: "/", remainder: "foo/bar"}, + {path: "///foo/bar", first: "/", remainder: "//foo/bar"}, + {path: "/foo///bar", first: "/", remainder: "foo///bar"}, + {path: "/foo/bar/.", first: "/", remainder: "foo/bar/."}, + {path: "/foo/bar/./", first: "/", remainder: "foo/bar/."}, + {path: "/foo/bar/./.", first: "/", remainder: "foo/bar/./."}, + {path: "/foo/bar/./..", first: "/", remainder: "foo/bar/./.."}, + {path: "/foo/bar/..", first: "/", remainder: "foo/bar/.."}, + {path: "/foo/bar/../", first: "/", remainder: "foo/bar/.."}, + {path: "/foo/bar/../.", first: "/", remainder: "foo/bar/../."}, + {path: "/foo/bar/../..", first: "/", remainder: "foo/bar/../.."}, + + {path: "foo", first: "foo", remainder: ""}, + {path: "foo", first: "foo", remainder: ""}, + {path: "foo/", first: "foo", remainder: ""}, + {path: "foo///", first: "foo", remainder: ""}, + {path: "foo/.", first: "foo", remainder: "."}, + {path: "foo/./", first: "foo", remainder: "."}, + {path: "foo/./.", first: "foo", remainder: "./."}, + {path: "foo/./..", first: "foo", remainder: "./.."}, + {path: "foo/..", first: "foo", remainder: ".."}, + {path: "foo/../", first: "foo", remainder: ".."}, + {path: "foo/../.", first: "foo", remainder: "../."}, + {path: "foo/../..", first: "foo", remainder: "../.."}, + {path: "foo/", first: "foo", remainder: ""}, + {path: "foo/.", first: "foo", remainder: "."}, + + {path: "foo/bar", first: "foo", remainder: "bar"}, + {path: "foo///bar", first: "foo", remainder: "bar"}, + {path: "foo/bar/", first: "foo", remainder: "bar"}, + {path: "foo/bar/.", first: "foo", remainder: "bar/."}, + {path: "foo/bar/./", first: "foo", remainder: "bar/."}, + {path: "foo/bar/./.", first: "foo", remainder: "bar/./."}, + {path: "foo/bar/./..", first: "foo", remainder: "bar/./.."}, + {path: "foo/bar/..", first: "foo", remainder: "bar/.."}, + {path: "foo/bar/../", first: "foo", remainder: "bar/.."}, + {path: "foo/bar/../.", first: "foo", remainder: "bar/../."}, + {path: "foo/bar/../..", first: "foo", remainder: "bar/../.."}, + {path: "foo/bar/", first: "foo", remainder: "bar"}, + {path: "foo/bar/.", first: "foo", remainder: "bar/."}, + } + + for _, c := range cases { + first, remainder := SplitFirst(c.path) + if first != c.first || remainder != c.remainder { + t.Errorf("SplitFirst(%q) got (%q, %q), expected (%q, %q)", c.path, first, remainder, c.first, c.remainder) + } + } +} diff --git a/pkg/sentry/fs/proc/BUILD b/pkg/sentry/fs/proc/BUILD new file mode 100644 index 000000000..18372cfbf --- /dev/null +++ b/pkg/sentry/fs/proc/BUILD @@ -0,0 +1,95 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "proc_state", + srcs = [ + "cpuinfo.go", + "exec_args.go", + "fds.go", + "file.go", + "filesystems.go", + "fs.go", + "loadavg.go", + "meminfo.go", + "mounts.go", + "net.go", + "proc.go", + "stat.go", + "sys.go", + "sys_net.go", + "task.go", + "uid_gid_map.go", + "uptime.go", + "version.go", + ], + out = "proc_state.go", + package = "proc", +) + +go_library( + name = "proc", + srcs = [ + "cpuinfo.go", + "exec_args.go", + "fds.go", + "file.go", + "filesystems.go", + "fs.go", + "loadavg.go", + "meminfo.go", + "mounts.go", + "net.go", + "proc.go", + "proc_state.go", + "stat.go", + "sys.go", + "sys_net.go", + "task.go", + "uid_gid_map.go", + "uptime.go", + "version.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/proc/device", + "//pkg/sentry/fs/proc/seqfile", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/mm", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + ], +) + +go_test( + name = "proc_test", + size = "small", + srcs = [ + "net_test.go", + "sys_net_test.go", + ], + embed = [":proc"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/sentry/inet", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/proc/README.md b/pkg/sentry/fs/proc/README.md new file mode 100644 index 000000000..c510ee63a --- /dev/null +++ b/pkg/sentry/fs/proc/README.md @@ -0,0 +1,317 @@ +This document tracks what is implemented in procfs. Refer to +Documentation/filesystems/proc.txt in the Linux project for information about +procfs generally. + +**NOTE**: This document is not guaranteed to be up to date. If you find an +inconsistency, please file a bug. + +[TOC] +## Kernel data + +The following files are implemented: + +| File /proc/ | Content | +| :------------------------ | :----------------------------------------------- | +| [cpuinfo](#cpuinfo) | Info about the CPU | +| [filesystem](#filesystem) | Supported filesystems | +| [loadavg](#loadavg) | Load average of last 1, 5 & 15 minutes | +| [meminfo](#meminfo) | Overall memory info | +| [stat](#stat) | Overall kernel statistics | +| [sys](#sys) | Change parameters within the kernel | +| [uptime](#uptime) | Wall clock since boot, combined idle time of all | +: : cpus : +| [version](#version) | Kernel version | + +### cpuinfo + +```bash +$ cat /proc/cpuinfo +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : unknown +stepping : unknown +cpu MHz : 1234.588 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx xsaveopt +bogomips : 1234.59 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +... +``` + +Notable divergences: + +Field name | Notes +:--------------- | :--------------------------------------- +model name | Always unknown +stepping | Always unknown +fpu | Always yes +fpu_exception | Always yes +wp | Always yes +bogomips | Bogus value (matches cpu MHz) +clflush size | Always 64 +cache_alignment | Always 64 +address sizes | Always 46 bits physical, 48 bits virtual +power management | Always blank + +Otherwise fields are derived from the SentryCPUIDSpec proto config. + +### filesystem + +```bash +$ cat /proc/filesystem +nodev 9p +nodev devtmpfs +nodev proc +nodev ramdiskfs +nodev sysfs +nodev tmpfs +``` + +Notable divergences: + +Filesystem | Notes +:--------- | :-------------------------------------------------------- +ramdiskfs | No Linux equivalent, see the SentryRamdiskFS proto config + +### loadavg + +```bash +$ cat /proc/loadavg +0.00 0.00 0.00 0/0 0 +``` + +Column | Notes +:------------------------------------ | :---------- +CPU.IO utilization in last 1 minute | Always zero +CPU.IO utilization in last 5 minutes | Always zero +CPU.IO utilization in last 10 minutes | Always zero +Num currently running processes | Always zero +Total num processes | Always zero + +TODO: Populate the columns with accurate statistics. +### meminfo + +```bash +$ cat /proc/meminfo +MemTotal: 2097152 kB +MemFree: 2083540 kB +MemAvailable: 2083540 kB +Buffers: 0 kB +Cached: 4428 kB +SwapCache: 0 kB +Active: 10812 kB +Inactive: 2216 kB +Active(anon): 8600 kB +Inactive(anon): 0 kB +Active(file): 2212 kB +Inactive(file): 2216 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 0 kB +SwapFree: 0 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 8600 kB +Mapped: 4428 kB +Shmem: 0 kB + +``` + +Notable divergences: + +Field name | Notes +:---------------- | :-------------------------------------------------------- +Buffers | Always zero, no block devices +SwapCache | Always zero, no swap +Inactive(anon) | Always zero, see SwapCache +Unevictable | Always zero TODO +Mlocked | Always zero TODO +SwapTotal | Always zero, no swap +SwapFree | Always zero, no swap +Dirty | Always zero TODO +Writeback | Always zero TODO +MemAvailable | Uses the same value as MemFree since there is no swap. +Slab | Missing +SReclaimable | Missing +SUnreclaim | Missing +KernelStack | Missing +PageTables | Missing +NFS_Unstable | Missing +Bounce | Missing +WritebackTmp | Missing +CommitLimit | Missing +Committed_AS | Missing +VmallocTotal | Missing +VmallocUsed | Missing +VmallocChunk | Missing +HardwareCorrupted | Missing +AnonHugePages | Missing +ShmemHugePages | Missing +ShmemPmdMapped | Missing +HugePages_Total | Missing +HugePages_Free | Missing +HugePages_Rsvd | Missing +HugePages_Surp | Missing +Hugepagesize | Missing +DirectMap4k | Missing +DirectMap2M | Missing +DirectMap1G | Missing + +See [Memory +Accounting](pkg/sentry/usage/g3doc/memory-accounting.md) +for general caveats. + +### stat + +```bash +$ cat /proc/stat +cpu 0 0 0 0 0 0 0 0 0 0 +cpu0 0 0 0 0 0 0 0 0 0 0 +cpu1 0 0 0 0 0 0 0 0 0 0 +cpu2 0 0 0 0 0 0 0 0 0 0 +cpu3 0 0 0 0 0 0 0 0 0 0 +cpu4 0 0 0 0 0 0 0 0 0 0 +cpu5 0 0 0 0 0 0 0 0 0 0 +cpu6 0 0 0 0 0 0 0 0 0 0 +cpu7 0 0 0 0 0 0 0 0 0 0 +intr 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 0 +btime 1504040968 +processes 0 +procs_running 0 +procs_blokkcked 0 +softirq 0 0 0 0 0 0 0 0 0 0 0 +``` + +All fields except for `btime` are always zero. +TODO: Populate with accurate fields. + +### sys + +```bash +$ ls /proc/sys +kernel vm +``` + +Directory | Notes +:-------- | :---------------------------- +abi | Missing +debug | Missing +dev | Missing +fs | Missing +kernel | Contains hostname (only) +net | Missing +user | Missing +vm | Contains mmap_min_addr (only) + +### uptime + +```bash +$ cat /proc/uptime +3204.62 0.00 +``` + +Column | Notes +:------------------------------- | :---------------------------- +Total num seconds system running | Time since procfs was mounted +Number of seconds idle | Always zero + +### version + +```bash +$ cat /proc/version +Linux version 3.11.10 #1 SMP Fri Nov 29 10:47:50 PST 2013 +``` + +## Process-specific data + +The following files are implemented: + +File /proc/PID | Content +:------------------ | :--------------------------------------------------- +[auxv](#auxv) | Copy of auxiliary vector for the process +[cmdline](#cmdline) | Command line arguments +[comm](#comm) | Command name associated with the process +[exe](#exe) | Symlink to the process's executable +[fd](#fd) | Directory containing links to open file descriptors +[fdinfo](#fdinfo) | Information associated with open file descriptors +[gid_map](#gid_map) | Mappings for group IDs inside the user namespace +[io](#io) | IO statistics +[maps](#maps) | Memory mappings (anon, executables, library files) +[ns](#ns) | Directory containing info about supported namespaces +[stat](#stat) | Process statistics +[status](#status) | Process status in human readable format +[task](#task) | Directory containing info about running threads +[uid_map](#uid_map) | Mappings for user IDs inside the user namespace + +### auxv + +TODO + +### cmdline + +TODO + +### comm + +TODO + +### exe + +TODO + +### fd + +TODO + +### fdinfo + +TODO + +### gid_map + +TODO + +### io + +Only has data for rchar, wchar, syscr, and syscw. + +TODO: add more detail. + +### maps + +TODO + +### ns + +TODO + +### stat + +Only has data for pid, comm, state, ppid, utime, stime, cutime, cstime, +num_threads, and exit_signal. + +TODO: add more detail. + +### status + +Statically created, most of the fields have no data. + +TODO: add more detail. + +### task + +TODO + +### uid_map + +TODO diff --git a/pkg/sentry/fs/proc/cpuinfo.go b/pkg/sentry/fs/proc/cpuinfo.go new file mode 100644 index 000000000..f80aaa5b1 --- /dev/null +++ b/pkg/sentry/fs/proc/cpuinfo.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// cpuinfo is a file describing the CPU capabilities. +// +// Presently cpuinfo never changes, so it doesn't need to be a SeqFile. +type cpuinfo struct { + ramfs.Entry + + // k is the system kernel. + k *kernel.Kernel +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (c *cpuinfo) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + features := c.k.FeatureSet() + if features == nil { + // Kernel is always initialized with a FeatureSet. + panic("cpuinfo read with nil FeatureSet") + } + + contents := make([]byte, 0, 1024) + for i, max := uint(0), c.k.ApplicationCores(); i < max; i++ { + contents = append(contents, []byte(features.CPUInfo(i))...) + } + if offset >= int64(len(contents)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, contents[offset:]) + return int64(n), err +} + +func (p *proc) newCPUInfo(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + f := &cpuinfo{ + k: p.k, + } + f.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444)) + + return newFile(f, msrc, fs.SpecialFile, nil) +} diff --git a/pkg/sentry/fs/proc/device/BUILD b/pkg/sentry/fs/proc/device/BUILD new file mode 100644 index 000000000..b62062bd7 --- /dev/null +++ b/pkg/sentry/fs/proc/device/BUILD @@ -0,0 +1,11 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "device", + srcs = ["device.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device", + visibility = ["//pkg/sentry:internal"], + deps = ["//pkg/sentry/device"], +) diff --git a/pkg/sentry/fs/proc/device/device.go b/pkg/sentry/fs/proc/device/device.go new file mode 100644 index 000000000..6194afe88 --- /dev/null +++ b/pkg/sentry/fs/proc/device/device.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package device contains the proc device to avoid dependency loops. +package device + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/device" +) + +// ProcDevice is the kernel proc device. +var ProcDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/proc/exec_args.go b/pkg/sentry/fs/proc/exec_args.go new file mode 100644 index 000000000..0e1523bf1 --- /dev/null +++ b/pkg/sentry/fs/proc/exec_args.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// execArgType enumerates the types of exec arguments that are exposed through +// proc. +type execArgType int + +const ( + cmdlineExecArg execArgType = iota + environExecArg +) + +// execArgFile is a file containing the exec args (either cmdline or environ) +// for a given task. +type execArgFile struct { + ramfs.Entry + + // arg is the type of exec argument this file contains. + arg execArgType + + // t is the Task to read the exec arg line from. + t *kernel.Task +} + +// newExecArgFile creates a file containing the exec args of the given type. +func newExecArgFile(t *kernel.Task, msrc *fs.MountSource, arg execArgType) *fs.Inode { + if arg != cmdlineExecArg && arg != environExecArg { + panic(fmt.Sprintf("unknown exec arg type %v", arg)) + } + f := &execArgFile{ + arg: arg, + t: t, + } + f.InitEntry(t, fs.RootOwner, fs.FilePermsFromMode(0444)) + return newFile(f, msrc, fs.SpecialFile, t) +} + +// DeprecatedPreadv reads the exec arg from the process's address space.. +func (f *execArgFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + // N.B. Linux 4.2 eliminates the arbitrary one page limit. + if offset > usermem.PageSize { + return 0, io.EOF + } + dst = dst.TakeFirst64(usermem.PageSize - offset) + + m, err := getTaskMM(f.t) + if err != nil { + return 0, err + } + defer m.DecUsers(ctx) + + // Figure out the bounds of the exec arg we are trying to read. + var execArgStart, execArgEnd usermem.Addr + switch f.arg { + case cmdlineExecArg: + execArgStart, execArgEnd = m.ArgvStart(), m.ArgvEnd() + case environExecArg: + execArgStart, execArgEnd = m.EnvvStart(), m.EnvvEnd() + default: + panic(fmt.Sprintf("unknown exec arg type %v", f.arg)) + } + if execArgStart == 0 || execArgEnd == 0 { + // Don't attempt to read before the start/end are set up. + return 0, io.EOF + } + + start, ok := execArgStart.AddLength(uint64(offset)) + if !ok { + return 0, io.EOF + } + if start >= execArgEnd { + return 0, io.EOF + } + + length := int(execArgEnd - start) + if dstlen := dst.NumBytes(); int64(length) > dstlen { + length = int(dstlen) + } + + buf := make([]byte, length) + // N.B. Technically this should be usermem.IOOpts.IgnorePermissions = true + // until Linux 4.9 (272ddc8b3735 "proc: don't use FOLL_FORCE for reading + // cmdline and environment"). + copyN, copyErr := m.CopyIn(ctx, start, buf, usermem.IOOpts{}) + if copyN == 0 { + // Nothing to copy. + return 0, copyErr + } + buf = buf[:copyN] + + // TODO: On Linux, if the NUL byte at the end of the + // argument vector has been overwritten, it continues reading the + // environment vector as part of the argument vector. + + n, dstErr := dst.CopyOut(ctx, buf) + if dstErr != nil { + return int64(n), dstErr + } + return int64(n), copyErr +} diff --git a/pkg/sentry/fs/proc/fds.go b/pkg/sentry/fs/proc/fds.go new file mode 100644 index 000000000..2eca9ac31 --- /dev/null +++ b/pkg/sentry/fs/proc/fds.go @@ -0,0 +1,258 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "sort" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// walkDescriptors finds the descriptor (file-flag pair) for the fd identified +// by p, and calls the toInodeOperations callback with that descriptor. This is a helper +// method for implementing fs.InodeOperations.Lookup. +func walkDescriptors(t *kernel.Task, p string, toInode func(*fs.File, kernel.FDFlags) *fs.Inode) (*fs.Inode, error) { + n, err := strconv.ParseUint(p, 10, 64) + if err != nil { + // Not found. + return nil, syserror.ENOENT + } + + var file *fs.File + var flags kernel.FDFlags + t.WithMuLocked(func(t *kernel.Task) { + if fdm := t.FDMap(); fdm != nil { + file, flags = fdm.GetDescriptor(kdefs.FD(n)) + } + }) + if file == nil { + return nil, syserror.ENOENT + } + return toInode(file, flags), nil +} + +// readDescriptors reads fds in the task starting at offset, and calls the +// toDentAttr callback for each to get a DentAttr, which it then emits. This is +// a helper for implementing fs.InodeOperations.Readdir. +func readDescriptors(t *kernel.Task, c *fs.DirCtx, offset int, toDentAttr func(int) fs.DentAttr) (int, error) { + var fds kernel.FDs + t.WithMuLocked(func(t *kernel.Task) { + if fdm := t.FDMap(); fdm != nil { + fds = fdm.GetFDs() + } + }) + + fdInts := make([]int, 0, len(fds)) + for _, fd := range fds { + fdInts = append(fdInts, int(fd)) + } + + // Find the fd to start at. + idx := sort.SearchInts(fdInts, offset) + if idx == len(fdInts) { + return offset, nil + } + fdInts = fdInts[idx:] + + var fd int + for _, fd = range fdInts { + name := strconv.FormatUint(uint64(fd), 10) + if err := c.DirEmit(name, toDentAttr(fd)); err != nil { + // Returned offset is the next fd to serialize. + return fd, err + } + } + // We serialized them all. Next offset should be higher than last + // serialized fd. + return fd + 1, nil +} + +// fd is a single file in /proc/TID/fd/. +type fd struct { + ramfs.Symlink + *fs.File +} + +// newFD returns a new fd based on an existing file. +// +// This inherits one reference to the file. +func newFd(t *kernel.Task, f *fs.File, msrc *fs.MountSource) *fs.Inode { + fd := &fd{File: f} + // RootOwner by default, is overridden in UnstableAttr() + fd.InitSymlink(t, fs.RootOwner, "") + return newFile(fd, msrc, fs.Symlink, t) +} + +// GetFile returns the fs.File backing this fd. The dirent and flags +// arguments are ignored. +func (f *fd) GetFile(context.Context, *fs.Dirent, fs.FileFlags) (*fs.File, error) { + // Take a reference on the fs.File. + f.File.IncRef() + return f.File, nil +} + +// Readlink returns the current target. +func (f *fd) Readlink(ctx context.Context, _ *fs.Inode) (string, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + n, _ := f.Dirent.FullName(root) + return n, nil +} + +// Getlink implements fs.InodeOperations.Getlink. +func (f *fd) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + f.Dirent.IncRef() + return f.Dirent, nil +} + +// Truncate is ignored. +func (f *fd) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// Close releases the reference on the file. +func (f *fd) Close() error { + f.DecRef() + return nil +} + +// fdDir implements /proc/TID/fd. +type fdDir struct { + ramfs.Dir + + // We hold a reference on the task's fdmap but only keep an indirect + // task pointer to avoid Dirent loading circularity caused by fdmap's + // potential back pointers into the dirent tree. + t *kernel.Task +} + +// newFdDir creates a new fdDir. +func newFdDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + f := &fdDir{t: t} + f.InitDir(t, nil, fs.RootOwner, fs.FilePermissions{User: fs.PermMask{Read: true, Execute: true}}) + return newFile(f, msrc, fs.SpecialDirectory, t) +} + +// Check implements InodeOperations.Check. +// +// This is to match Linux, which uses a special permission handler to guarantee +// that a process can still access /proc/self/fd after it has executed +// setuid. See fs/proc/fd.c:proc_fd_permission. +func (f *fdDir) Check(ctx context.Context, inode *fs.Inode, req fs.PermMask) bool { + if fs.ContextCanAccessFile(ctx, inode, req) { + return true + } + if t := kernel.TaskFromContext(ctx); t != nil { + // Allow access if the task trying to access it is in the + // thread group corresponding to this directory. + // + // N.B. Technically, in Linux 3.11, this compares what would be + // the equivalent of task pointers. However, this was fixed + // later in 54708d2858e7 ("proc: actually make + // proc_fd_permission() thread-friendly"). + if f.t.ThreadGroup() == t.ThreadGroup() { + return true + } + } + return false +} + +// Lookup loads an Inode in /proc/TID/fd into a Dirent. +func (f *fdDir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + n, err := walkDescriptors(f.t, p, func(file *fs.File, _ kernel.FDFlags) *fs.Inode { + return newFd(f.t, file, dir.MountSource) + }) + if err != nil { + return nil, err + } + return fs.NewDirent(n, p), nil +} + +// DeprecatedReaddir lists fds in /proc/TID/fd. +func (f *fdDir) DeprecatedReaddir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + return readDescriptors(f.t, dirCtx, offset, func(fd int) fs.DentAttr { + return fs.GenericDentAttr(fs.Symlink, device.ProcDevice) + }) +} + +// fdInfo is a single file in /proc/TID/fdinfo/. +type fdInfo struct { + ramfs.File + + flags kernel.FDFlags +} + +// newFdInfo returns a new fdInfo based on an existing file. +func newFdInfo(t *kernel.Task, _ *fs.File, flags kernel.FDFlags, msrc *fs.MountSource) *fs.Inode { + fdi := &fdInfo{flags: flags} + fdi.InitFile(t, fs.RootOwner, fs.FilePermissions{User: fs.PermMask{Read: true}}) + // TODO: Get pos, locks, and other data. For now we only + // have flags. + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + fdi.Append([]byte(fmt.Sprintf("flags: %08o\n", flags))) + return newFile(fdi, msrc, fs.SpecialFile, t) +} + +// DeprecatedPwritev implements fs.HandleOperations.DeprecatedPwritev. +func (*fdInfo) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + return 0, ramfs.ErrInvalidOp +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*fdInfo) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + return ramfs.ErrInvalidOp +} + +// fdInfoDir implements /proc/TID/fdinfo. It embeds an fdDir, but overrides +// Lookup and Readdir. +type fdInfoDir struct { + ramfs.Dir + + t *kernel.Task +} + +// newFdInfoDir creates a new fdInfoDir. +func newFdInfoDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + fdid := &fdInfoDir{t: t} + fdid.InitDir(t, nil, fs.RootOwner, fs.FilePermsFromMode(0500)) + return newFile(fdid, msrc, fs.SpecialDirectory, t) +} + +// Lookup loads an fd in /proc/TID/fdinfo into a Dirent. +func (fdid *fdInfoDir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + n, err := walkDescriptors(fdid.t, p, func(file *fs.File, flags kernel.FDFlags) *fs.Inode { + return newFdInfo(fdid.t, file, flags, dir.MountSource) + }) + if err != nil { + return nil, err + } + return fs.NewDirent(n, p), nil +} + +// DeprecatedReaddir lists fds in /proc/TID/fdinfo. +func (fdid *fdInfoDir) DeprecatedReaddir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + return readDescriptors(fdid.t, dirCtx, offset, func(fd int) fs.DentAttr { + return fs.GenericDentAttr(fs.RegularFile, device.ProcDevice) + }) +} diff --git a/pkg/sentry/fs/proc/file.go b/pkg/sentry/fs/proc/file.go new file mode 100644 index 000000000..9a433cdf8 --- /dev/null +++ b/pkg/sentry/fs/proc/file.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type file struct { + fs.InodeOperations + + // nodeType is the file type of this file. + nodeType fs.InodeType + + // t is the associated kernel task that owns this file. + t *kernel.Task +} + +func newFile(node fs.InodeOperations, msrc *fs.MountSource, nodeType fs.InodeType, t *kernel.Task) *fs.Inode { + iops := &file{node, nodeType, t} + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: nodeType, + } + return fs.NewInode(iops, msrc, sattr) +} + +// UnstableAttr returns all attributes of this file. +func (f *file) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := f.InodeOperations.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + if f.t != nil { + uattr.Owner = fs.FileOwnerFromContext(f.t) + } + return uattr, nil +} diff --git a/pkg/sentry/fs/proc/filesystems.go b/pkg/sentry/fs/proc/filesystems.go new file mode 100644 index 000000000..fe4de18ba --- /dev/null +++ b/pkg/sentry/fs/proc/filesystems.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" +) + +// filesystemsData backs /proc/filesystems. +type filesystemsData struct{} + +// NeedsUpdate returns true on the first generation. The set of registered file +// systems doesn't change so there's no need to generate SeqData more than once. +func (*filesystemsData) NeedsUpdate(generation int64) bool { + return generation == 0 +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (*filesystemsData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + // We don't ever expect to see a non-nil SeqHandle. + if h != nil { + return nil, 0 + } + + // Generate the file contents. + var buf bytes.Buffer + for _, sys := range fs.GetFilesystems() { + nodev := "nodev" + if sys.Flags()&fs.FilesystemRequiresDev != 0 { + nodev = "" + } + // Matches the format of fs/filesystems.c:filesystems_proc_show. + fmt.Fprintf(&buf, "%s\t%s\n", nodev, sys.Name()) + } + + // Return the SeqData and advance the generation counter. + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*filesystemsData)(nil)}}, 1 +} diff --git a/pkg/sentry/fs/proc/fs.go b/pkg/sentry/fs/proc/fs.go new file mode 100644 index 000000000..072d00beb --- /dev/null +++ b/pkg/sentry/fs/proc/fs.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// filesystem is a procfs. +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name underwhich the filesystem is registered. +// Name matches fs/proc/root.c:proc_fs_type.name. +const FilesystemName = "proc" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, proc returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/proc/root.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns the root of a procfs that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // device is always ignored. + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Proc options parsing checks for either a gid= or hidepid= and barfs on + // anything else, see fs/proc/root.c:proc_parse_options. Since we don't know + // what to do with gid= or hidepid=, we blow up if we get any options. + if len(options) > 0 { + return nil, fmt.Errorf("unsupported mount options: %v", options) + } + + // Construct the procfs root. Since procfs files are all virtual, we + // never want them cached. + return New(ctx, fs.NewNonCachingMountSource(f, flags)) +} diff --git a/pkg/sentry/fs/proc/loadavg.go b/pkg/sentry/fs/proc/loadavg.go new file mode 100644 index 000000000..694cde656 --- /dev/null +++ b/pkg/sentry/fs/proc/loadavg.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" +) + +// loadavgData backs /proc/loadavg. +type loadavgData struct{} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*loadavgData) NeedsUpdate(generation int64) bool { + return true +} + +func (d *loadavgData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + // TODO: Include real data in fields. + // Column 1-3: CPU and IO utilization of the last 1, 5, and 10 minute periods. + // Column 4-5: currently running processes and the total number of processes. + // Column 6: the last process ID used. + fmt.Fprintf(&buf, "%.2f %.2f %.2f %d/%d %d\n", 0.00, 0.00, 0.00, 0, 0, 0) + + return []seqfile.SeqData{ + { + Buf: buf.Bytes(), + Handle: (*loadavgData)(nil), + }, + }, 0 +} diff --git a/pkg/sentry/fs/proc/meminfo.go b/pkg/sentry/fs/proc/meminfo.go new file mode 100644 index 000000000..489f796e5 --- /dev/null +++ b/pkg/sentry/fs/proc/meminfo.go @@ -0,0 +1,82 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// meminfoData backs /proc/meminfo. +type meminfoData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*meminfoData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (d *meminfoData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + mem := d.k.Platform.Memory() + mem.UpdateUsage() + snapshot, totalUsage := usage.MemoryAccounting.Copy() + totalSize := usage.TotalMemory(mem.TotalSize(), totalUsage) + anon := snapshot.Anonymous + snapshot.Tmpfs + file := snapshot.PageCache + snapshot.Mapped + // We don't actually have active/inactive LRUs, so just make up numbers. + activeFile := (file / 2) &^ (usermem.PageSize - 1) + inactiveFile := file - activeFile + + var buf bytes.Buffer + fmt.Fprintf(&buf, "MemTotal: %8d kB\n", totalSize/1024) + memFree := (totalSize - totalUsage) / 1024 + // We use MemFree as MemAvailable because we don't swap. + // TODO: When reclaim is implemented the value of MemAvailable + // should change. + fmt.Fprintf(&buf, "MemFree: %8d kB\n", memFree) + fmt.Fprintf(&buf, "MemAvailable: %8d kB\n", memFree) + fmt.Fprintf(&buf, "Buffers: 0 kB\n") // memory usage by block devices + fmt.Fprintf(&buf, "Cached: %8d kB\n", (file+snapshot.Tmpfs)/1024) + // Emulate a system with no swap, which disables inactivation of anon pages. + fmt.Fprintf(&buf, "SwapCache: 0 kB\n") + fmt.Fprintf(&buf, "Active: %8d kB\n", (anon+activeFile)/1024) + fmt.Fprintf(&buf, "Inactive: %8d kB\n", inactiveFile/1024) + fmt.Fprintf(&buf, "Active(anon): %8d kB\n", anon/1024) + fmt.Fprintf(&buf, "Inactive(anon): 0 kB\n") + fmt.Fprintf(&buf, "Active(file): %8d kB\n", activeFile/1024) + fmt.Fprintf(&buf, "Inactive(file): %8d kB\n", inactiveFile/1024) + fmt.Fprintf(&buf, "Unevictable: 0 kB\n") // TODO + fmt.Fprintf(&buf, "Mlocked: 0 kB\n") // TODO + fmt.Fprintf(&buf, "SwapTotal: 0 kB\n") + fmt.Fprintf(&buf, "SwapFree: 0 kB\n") + fmt.Fprintf(&buf, "Dirty: 0 kB\n") + fmt.Fprintf(&buf, "Writeback: 0 kB\n") + fmt.Fprintf(&buf, "AnonPages: %8d kB\n", anon/1024) + fmt.Fprintf(&buf, "Mapped: %8d kB\n", file/1024) // doesn't count mapped tmpfs, which we don't know + fmt.Fprintf(&buf, "Shmem: %8d kB\n", snapshot.Tmpfs/1024) + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*meminfoData)(nil)}}, 0 +} diff --git a/pkg/sentry/fs/proc/mounts.go b/pkg/sentry/fs/proc/mounts.go new file mode 100644 index 000000000..76092567d --- /dev/null +++ b/pkg/sentry/fs/proc/mounts.go @@ -0,0 +1,176 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "sort" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// forEachMountSource runs f for the process root mount and each mount that is a +// descendant of the root. +func forEachMountSource(t *kernel.Task, fn func(string, *fs.MountSource)) { + // All mount points must be relative to the rootDir, and mounts outside + // will be excluded. + rootDir := t.FSContext().RootDirectory() + defer rootDir.DecRef() + + if rootDir.Inode == nil { + panic(fmt.Sprintf("root dirent has nil inode: %+v", rootDir)) + } + if rootDir.Inode.MountSource == nil { + panic(fmt.Sprintf("root dirent has nil mount: %+v", rootDir)) + } + + ms := append(rootDir.Inode.MountSource.Submounts(), rootDir.Inode.MountSource) + sort.Slice(ms, func(i, j int) bool { + return ms[i].ID() < ms[j].ID() + }) + for _, m := range ms { + mountPath, desc := m.Root().FullName(rootDir) + if !desc { + // MountSources that are not descendants of the chroot jail are ignored. + continue + } + + fn(mountPath, m) + } +} + +// mountInfoFile is used to implement /proc/[pid]/mountinfo. +type mountInfoFile struct { + t *kernel.Task +} + +// NeedsUpdate implements SeqSource.NeedsUpdate. +func (mif *mountInfoFile) NeedsUpdate(_ int64) bool { + return true +} + +// ReadSeqFileData implements SeqSource.ReadSeqFileData. +func (mif *mountInfoFile) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if handle != nil { + return nil, 0 + } + + var buf bytes.Buffer + forEachMountSource(mif.t, func(mountPath string, m *fs.MountSource) { + // Format: + // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + // (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + // (1) MountSource ID. + fmt.Fprintf(&buf, "%d ", m.ID()) + + // (2) Parent ID (or this ID if there is no parent). + pID := m.ID() + if p := m.Parent(); p != nil { + pID = p.ID() + } + fmt.Fprintf(&buf, "%d ", pID) + + // (3) Major:Minor device ID. We don't have a superblock, so we + // just use the root inode device number. + sa := m.Root().Inode.StableAttr + fmt.Fprintf(&buf, "%d:%d ", sa.DeviceFileMajor, sa.DeviceFileMinor) + + // (4) Root: the pathname of the directory in the filesystem + // which forms the root of this mount. + // + // NOTE: This will always be "/" until we implement + // bind mounts. + fmt.Fprintf(&buf, "/ ") + + // (5) Mount point (relative to process root). + fmt.Fprintf(&buf, "%s ", mountPath) + + // (6) Mount options. + opts := "rw" + if m.Flags.ReadOnly { + opts = "ro" + } + if m.Flags.NoAtime { + opts += ",noatime" + } + fmt.Fprintf(&buf, "%s ", opts) + + // (7) Optional fields: zero or more fields of the form "tag[:value]". + // (8) Separator: the end of the optional fields is marked by a single hyphen. + fmt.Fprintf(&buf, "- ") + + // (9) Filesystem type. + name := "none" + if m.Filesystem != nil { + name = m.Filesystem.Name() + } + fmt.Fprintf(&buf, "%s ", name) + + // (10) Mount source: filesystem-specific information or "none". + fmt.Fprintf(&buf, "none ") + + // (11) Superblock options. Only "ro/rw" is supported for now, + // and is the same as the filesystem option. + fmt.Fprintf(&buf, "%s\n", opts) + }) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*mountInfoFile)(nil)}}, 0 +} + +// mountsFile is used to implement /proc/[pid]/mountinfo. +type mountsFile struct { + t *kernel.Task +} + +// NeedsUpdate implements SeqSource.NeedsUpdate. +func (mf *mountsFile) NeedsUpdate(_ int64) bool { + return true +} + +// ReadSeqFileData implements SeqSource.ReadSeqFileData. +func (mf *mountsFile) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if handle != nil { + return nil, 0 + } + + var buf bytes.Buffer + forEachMountSource(mf.t, func(mountPath string, m *fs.MountSource) { + // Format (tab-separated): + // <special device or remote filesystem> <mount point> <filesystem type> <mount options> <needs dump> <fsck order> + // + // We use the filesystem name as the first field, since there + // is no real block device we can point to, and we also should + // not expose anything about the remote filesystem. + // + // Only ro/rw option is supported for now. + // + // The "needs dump"and fsck flags are always 0, which is allowed. + opts := "rw" + if m.Flags.ReadOnly { + opts = "ro" + } + name := "none" + if m.Filesystem != nil { + name = m.Filesystem.Name() + } + fmt.Fprintf(&buf, "%s\t%s\t%s\t%s\t%d\t%d\n", "none", mountPath, name, opts, 0, 0) + }) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*mountsFile)(nil)}}, 0 +} diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go new file mode 100644 index 000000000..6e464857a --- /dev/null +++ b/pkg/sentry/fs/proc/net.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" +) + +// newNet creates a new proc net entry. +func (p *proc) newNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + if s := p.k.NetworkStack(); s != nil && s.SupportsIPv6() { + d.AddChild(ctx, "dev", seqfile.NewSeqFileInode(ctx, &netDev{s: s}, msrc)) + d.AddChild(ctx, "if_inet6", seqfile.NewSeqFileInode(ctx, &ifinet6{s: s}, msrc)) + } + return newFile(d, msrc, fs.SpecialDirectory, nil) +} + +// ifinet6 implements seqfile.SeqSource for /proc/net/if_inet6. +type ifinet6 struct { + s inet.Stack `state:"nosave"` // S/R-FIXME +} + +func (n *ifinet6) contents() []string { + var lines []string + nics := n.s.Interfaces() + for id, naddrs := range n.s.InterfaceAddrs() { + nic, ok := nics[id] + if !ok { + // NIC was added after NICNames was called. We'll just + // ignore it. + continue + } + + for _, a := range naddrs { + // IPv6 only. + if a.Family != linux.AF_INET6 { + continue + } + + // Fields: + // IPv6 address displayed in 32 hexadecimal chars without colons + // Netlink device number (interface index) in hexadecimal (use nic id) + // Prefix length in hexadecimal + // Scope value (use 0) + // Interface flags + // Device name + lines = append(lines, fmt.Sprintf("%032x %02x %02x %02x %02x %8s\n", a.Addr, id, a.PrefixLen, 0, a.Flags, nic.Name)) + } + } + return lines +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*ifinet6) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (n *ifinet6) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var data []seqfile.SeqData + for _, l := range n.contents() { + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*ifinet6)(nil)}) + } + + return data, 0 +} + +// netDev implements seqfile.SeqSource for /proc/net/dev. +type netDev struct { + s inet.Stack `state:"nosave"` // S/R-FIXME +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (n *netDev) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's +// net/core/net-procfs.c:dev_seq_show. +func (n *netDev) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + interfaces := n.s.Interfaces() + contents := make([]string, 2, 2+len(interfaces)) + // Add the table header. From net/core/net-procfs.c:dev_seq_show. + contents[0] = "Inter-| Receive | Transmit\n" + contents[1] = " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n" + + for _, i := range interfaces { + // TODO: Collect stats from each inet.Stack + // implementation (hostinet, epsocket, and rpcinet). + + // Implements the same format as + // net/core/net-procfs.c:dev_seq_printf_stats. + l := fmt.Sprintf("%6s: %7d %7d %4d %4d %4d %5d %10d %9d %8d %7d %4d %4d %4d %5d %7d %10d\n", + i.Name, + // Received + 0, // bytes + 0, // packets + 0, // errors + 0, // dropped + 0, // fifo + 0, // frame + 0, // compressed + 0, // multicast + // Transmitted + 0, // bytes + 0, // packets + 0, // errors + 0, // dropped + 0, // fifo + 0, // frame + 0, // compressed + 0) // multicast + contents = append(contents, l) + } + + var data []seqfile.SeqData + for _, l := range contents { + data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*ifinet6)(nil)}) + } + + return data, 0 +} diff --git a/pkg/sentry/fs/proc/net_test.go b/pkg/sentry/fs/proc/net_test.go new file mode 100644 index 000000000..a31a20494 --- /dev/null +++ b/pkg/sentry/fs/proc/net_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" +) + +func newIPv6TestStack() *inet.TestStack { + s := inet.NewTestStack() + s.SupportsIPv6Flag = true + return s +} + +func TestIfinet6NoAddresses(t *testing.T) { + n := &ifinet6{s: newIPv6TestStack()} + if got := n.contents(); got != nil { + t.Errorf("Got n.contents() = %v, want = %v", got, nil) + } +} + +func TestIfinet6(t *testing.T) { + s := newIPv6TestStack() + s.InterfacesMap[1] = inet.Interface{Name: "eth0"} + s.InterfaceAddrsMap[1] = []inet.InterfaceAddr{ + { + Family: linux.AF_INET6, + PrefixLen: 128, + Addr: []byte("\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"), + }, + } + s.InterfacesMap[2] = inet.Interface{Name: "eth1"} + s.InterfaceAddrsMap[2] = []inet.InterfaceAddr{ + { + Family: linux.AF_INET6, + PrefixLen: 128, + Addr: []byte("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"), + }, + } + want := map[string]struct{}{ + "000102030405060708090a0b0c0d0e0f 01 80 00 00 eth0\n": {}, + "101112131415161718191a1b1c1d1e1f 02 80 00 00 eth1\n": {}, + } + + n := &ifinet6{s: s} + contents := n.contents() + if len(contents) != len(want) { + t.Errorf("Got len(n.contents()) = %d, want = %d", len(contents), len(want)) + } + got := map[string]struct{}{} + for _, l := range contents { + got[l] = struct{}{} + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("Got n.contents() = %v, want = %v", got, want) + } +} diff --git a/pkg/sentry/fs/proc/proc.go b/pkg/sentry/fs/proc/proc.go new file mode 100644 index 000000000..459eb7e62 --- /dev/null +++ b/pkg/sentry/fs/proc/proc.go @@ -0,0 +1,182 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package proc implements a partial in-memory file system for profs. +package proc + +import ( + "fmt" + "sort" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// proc is a root proc node. +type proc struct { + ramfs.Dir + + // k is the Kernel containing this proc node. + k *kernel.Kernel + + // pidns is the PID namespace of the task that mounted the proc filesystem + // that this node represents. + pidns *kernel.PIDNamespace +} + +// New returns the root node of a partial simple procfs. +func New(ctx context.Context, msrc *fs.MountSource) (*fs.Inode, error) { + k := kernel.KernelFromContext(ctx) + if k == nil { + return nil, fmt.Errorf("procfs requires a kernel") + } + pidns := kernel.PIDNamespaceFromContext(ctx) + if pidns == nil { + return nil, fmt.Errorf("procfs requires a PID namespace") + } + + p := &proc{k: k, pidns: pidns} + p.InitDir(ctx, map[string]*fs.Inode{ + // Note that these are just the static members. There are + // dynamic members populated in Readdir and Lookup below. + "filesystems": seqfile.NewSeqFileInode(ctx, &filesystemsData{}, msrc), + "loadavg": seqfile.NewSeqFileInode(ctx, &loadavgData{}, msrc), + "meminfo": seqfile.NewSeqFileInode(ctx, &meminfoData{k}, msrc), + "mounts": newMountsSymlink(ctx, msrc), + "stat": seqfile.NewSeqFileInode(ctx, &statData{k}, msrc), + "version": seqfile.NewSeqFileInode(ctx, &versionData{k}, msrc), + }, fs.RootOwner, fs.FilePermsFromMode(0555)) + + p.AddChild(ctx, "cpuinfo", p.newCPUInfo(ctx, msrc)) + p.AddChild(ctx, "uptime", p.newUptime(ctx, msrc)) + + return newFile(p, msrc, fs.SpecialDirectory, nil), nil +} + +// self is a magical link. +type self struct { + ramfs.Symlink + + pidns *kernel.PIDNamespace +} + +// newSelf returns a new "self" node. +func (p *proc) newSelf(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + s := &self{pidns: p.pidns} + s.InitSymlink(ctx, fs.RootOwner, "") + return newFile(s, msrc, fs.Symlink, nil) +} + +// Readlink implements fs.InodeOperations.Readlink. +func (s *self) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if t := kernel.TaskFromContext(ctx); t != nil { + tgid := s.pidns.IDOfThreadGroup(t.ThreadGroup()) + if tgid == 0 { + return "", ramfs.ErrNotFound + } + return strconv.FormatUint(uint64(tgid), 10), nil + } + + // Who is reading this link? + return "", ramfs.ErrInvalidOp +} + +// Lookup loads an Inode at name into a Dirent. +func (p *proc) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + // Is it one of the static ones? + dirent, walkErr := p.Dir.Lookup(ctx, dir, name) + if walkErr == nil { + return dirent, nil + } + + // Is it a dynamic element? + nfs := map[string]func() *fs.Inode{ + "net": func() *fs.Inode { return p.newNetDir(ctx, dir.MountSource) }, + "self": func() *fs.Inode { return p.newSelf(ctx, dir.MountSource) }, + "sys": func() *fs.Inode { return p.newSysDir(ctx, dir.MountSource) }, + } + if nf, ok := nfs[name]; ok { + return fs.NewDirent(nf(), name), nil + } + + // Try to lookup a corresponding task. + tid, err := strconv.ParseUint(name, 10, 64) + if err != nil { + // Ignore the parse error and return the original. + return nil, walkErr + } + + // Grab the other task. + otherTask := p.pidns.TaskWithID(kernel.ThreadID(tid)) + if otherTask == nil { + // Per above. + return nil, walkErr + } + + // Wrap it in a taskDir. + td := newTaskDir(otherTask, dir.MountSource, p.pidns, true) + return fs.NewDirent(td, name), nil +} + +// Readdir synthesizes proc contents. +func (p *proc) DeprecatedReaddir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + // Serialize normal contents. + _, err := p.Dir.DeprecatedReaddir(ctx, dirCtx, offset) + if err != nil { + return offset, err + } + + m := make(map[string]fs.DentAttr) + var names []string + + // Add special files. + m["sys"] = fs.GenericDentAttr(fs.SpecialFile, device.ProcDevice) + names = append(names, "sys") + + // Collect tasks. + // Per linux we only include it in directory listings if it's the leader. + // But for whatever crazy reason, you can still walk to the given node. + for _, tg := range p.pidns.ThreadGroups() { + if leader := tg.Leader(); leader != nil { + name := strconv.FormatUint(uint64(tg.ID()), 10) + m[name] = fs.GenericDentAttr(fs.SpecialDirectory, device.ProcDevice) + names = append(names, name) + } + } + + if offset >= len(m) { + return offset, nil + } + sort.Strings(names) + names = names[offset:] + for _, name := range names { + if err := dirCtx.DirEmit(name, m[name]); err != nil { + return offset, err + } + offset++ + } + return offset, err +} + +// newMountsSymlink returns a symlink to "self/mounts" +func newMountsSymlink(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + s := &ramfs.Symlink{} + s.InitSymlink(ctx, fs.RootOwner, "self/mounts") + return newFile(s, msrc, fs.Symlink, nil) +} diff --git a/pkg/sentry/fs/proc/seqfile/BUILD b/pkg/sentry/fs/proc/seqfile/BUILD new file mode 100644 index 000000000..48dd25e5b --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/BUILD @@ -0,0 +1,55 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "seqfile_state", + srcs = [ + "seqfile.go", + ], + out = "seqfile_state.go", + package = "seqfile", +) + +go_library( + name = "seqfile", + srcs = [ + "seqfile.go", + "seqfile_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/proc/device", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/state", + ], +) + +go_stateify( + name = "seqfile_test_state", + srcs = ["seqfile_test.go"], + out = "seqfile_test_state.go", + package = "seqfile", +) + +go_test( + name = "seqfile_test", + size = "small", + srcs = [ + "seqfile_test.go", + "seqfile_test_state.go", + ], + embed = [":seqfile"], + deps = [ + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ramfs/test", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/proc/seqfile/seqfile.go b/pkg/sentry/fs/proc/seqfile/seqfile.go new file mode 100644 index 000000000..e37a85869 --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/seqfile.go @@ -0,0 +1,232 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seqfile + +import ( + "io" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// SeqHandle is a helper handle to seek in the file. +type SeqHandle interface{} + +// SeqData holds the data for one unit in the file. +type SeqData struct { + // The data to be returned to the user. + Buf []byte + + // A seek handle used to find the next valid unit in ReadSeqFiledata. + Handle SeqHandle +} + +// SeqSource is a data source for a SeqFile file. +type SeqSource interface { + // NeedsUpdate returns true if the consumer of SeqData should call + // ReadSeqFileData again. Generation is the generation returned by + // ReadSeqFile or 0. + NeedsUpdate(generation int64) bool + + // Returns a slice of SeqData ordered by unit and the current + // generation. The first entry in the slice is greater than the handle. + // If handle is nil then all known records are returned. Generation + // must always be greater than 0. + ReadSeqFileData(handle SeqHandle) ([]SeqData, int64) +} + +// SeqGenerationCounter is a counter to keep track if the SeqSource should be +// updated. SeqGenerationCounter is not thread-safe and should be protected +// with a mutex. +type SeqGenerationCounter struct { + // The generation that the SeqData is at. + generation int64 +} + +// SetGeneration sets the generation to the new value, be careful to not set it +// to a value less than current. +func (s *SeqGenerationCounter) SetGeneration(generation int64) { + s.generation = generation +} + +// Update increments the current generation. +func (s *SeqGenerationCounter) Update() { + s.generation++ +} + +// Generation returns the current generation counter. +func (s *SeqGenerationCounter) Generation() int64 { + return s.generation +} + +// IsCurrent returns whether the given generation is current or not. +func (s *SeqGenerationCounter) IsCurrent(generation int64) bool { + return s.Generation() == generation +} + +// SeqFile is used to provide dynamic files that can be ordered by record. +type SeqFile struct { + ramfs.Entry + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + SeqSource + + source []SeqData + generation int64 + lastRead int64 +} + +// NewSeqFile returns a seqfile suitable for use by external consumers. +func NewSeqFile(ctx context.Context, source SeqSource) *SeqFile { + s := &SeqFile{SeqSource: source} + s.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444)) + return s +} + +// NewSeqFileInode returns an Inode with SeqFile InodeOperations. +func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource) *fs.Inode { + iops := NewSeqFile(ctx, source) + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(iops, msrc, sattr) +} + +// UnstableAttr returns unstable attributes of the SeqFile. +func (s *SeqFile) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, _ := s.Entry.UnstableAttr(ctx, inode) + uattr.ModificationTime = ktime.NowFromContext(ctx) + return uattr, nil +} + +// findIndexAndOffset finds the unit that corresponds to a certain offset. +// Returns the unit and the offset within the unit. If there are not enough +// units len(data) and leftover offset is returned. +func findIndexAndOffset(data []SeqData, offset int64) (int, int64) { + for i, buf := range data { + l := int64(len(buf.Buf)) + if offset < l { + return i, offset + } + offset -= l + } + return len(data), offset +} + +// DeprecatedPreadv reads from the file at the given offset. +func (s *SeqFile) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + + s.Entry.NotifyAccess(ctx) + defer func() { s.lastRead = offset }() + + updated := false + + // Try to find where we should start reading this file. + i, recordOffset := findIndexAndOffset(s.source, offset) + if i == len(s.source) { + // Ok, we're at EOF. Let's first check to see if there might be + // more data available to us. If there is more data, add it to + // the end and try reading again. + if !s.SeqSource.NeedsUpdate(s.generation) { + return 0, io.EOF + } + oldLen := len(s.source) + s.updateSourceLocked(len(s.source)) + updated = true + // We know that we had consumed everything up until this point + // so we search in the new slice instead of starting over. + i, recordOffset = findIndexAndOffset(s.source[oldLen:], recordOffset) + i += oldLen + // i is at most the length of the slice which is + // len(s.source) - oldLen. So at most i will be equal to + // len(s.source). + if i == len(s.source) { + return 0, io.EOF + } + } + + var done int64 + // We're reading parts of a record, finish reading the current object + // before continuing on to the next. We don't refresh our data source + // before this record is completed. + if recordOffset != 0 { + n, err := dst.CopyOut(ctx, s.source[i].Buf[recordOffset:]) + done += int64(n) + dst = dst.DropFirst(n) + if dst.NumBytes() == 0 || err != nil { + return done, err + } + i++ + } + + // Next/New unit, update the source file if necessary. Make an extra + // check to see if we've seeked backwards and if so always update our + // data source. + if !updated && (s.SeqSource.NeedsUpdate(s.generation) || s.lastRead > offset) { + s.updateSourceLocked(i) + // recordOffset is 0 here and we won't update records behind the + // current one so recordOffset is still 0 even though source + // just got updated. Just read the next record. + } + + // Finish by reading all the available data. + for _, buf := range s.source[i:] { + n, err := dst.CopyOut(ctx, buf.Buf) + done += int64(n) + dst = dst.DropFirst(n) + if dst.NumBytes() == 0 || err != nil { + return done, err + } + } + + // If the file shrank (entries not yet read were removed above) + // while we tried to read we can end up with nothing read. + if done == 0 && dst.NumBytes() != 0 { + return 0, io.EOF + } + return done, nil +} + +// updateSourceLocked requires that s.mu is held. +func (s *SeqFile) updateSourceLocked(record int) { + var h SeqHandle + if record == 0 { + h = nil + } else { + h = s.source[record-1].Handle + } + // Save what we have previously read. + s.source = s.source[:record] + var newSource []SeqData + newSource, s.generation = s.SeqSource.ReadSeqFileData(h) + s.source = append(s.source, newSource...) +} + +// DeprecatedPwritev is always denied. +func (*SeqFile) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, ramfs.ErrDenied +} diff --git a/pkg/sentry/fs/proc/seqfile/seqfile_test.go b/pkg/sentry/fs/proc/seqfile/seqfile_test.go new file mode 100644 index 000000000..0bf39ad82 --- /dev/null +++ b/pkg/sentry/fs/proc/seqfile/seqfile_test.go @@ -0,0 +1,272 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seqfile + +import ( + "bytes" + "fmt" + "io" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + ramfstest "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type seqTest struct { + actual []SeqData + update bool +} + +func (s *seqTest) Init() { + var sq []SeqData + // Create some SeqData. + for i := 0; i < 10; i++ { + var b []byte + for j := 0; j < 10; j++ { + b = append(b, byte(i)) + } + sq = append(sq, SeqData{ + Buf: b, + Handle: &testHandle{i: i}, + }) + } + s.actual = sq +} + +// NeedsUpdate reports whether we need to update the data we've previously read. +func (s *seqTest) NeedsUpdate(int64) bool { + return s.update +} + +// ReadSeqFiledata returns a slice of SeqData which contains elements +// greater than the handle. +func (s *seqTest) ReadSeqFileData(handle SeqHandle) ([]SeqData, int64) { + if handle == nil { + return s.actual, 0 + } + h := *handle.(*testHandle) + var ret []SeqData + for _, b := range s.actual { + // We want the next one. + h2 := *b.Handle.(*testHandle) + if h2.i > h.i { + ret = append(ret, b) + } + } + return ret, 0 +} + +// Flatten a slice of slices into one slice. +func flatten(buf ...[]byte) []byte { + var flat []byte + for _, b := range buf { + flat = append(flat, b...) + } + return flat +} + +type testHandle struct { + i int +} + +type testTable struct { + offset int64 + readBufferSize int + expectedData []byte + expectedError error +} + +func runTableTests(ctx context.Context, table []testTable, n fs.InodeOperations) error { + for _, tt := range table { + data := make([]byte, tt.readBufferSize) + resultLen, err := n.DeprecatedPreadv(ctx, usermem.BytesIOSequence(data), tt.offset) + if err != tt.expectedError { + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError) + } + expectedLen := int64(len(tt.expectedData)) + if resultLen != expectedLen { + // We make this just an error so we wall through and print the data below. + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen) + } + if !bytes.Equal(data[:expectedLen], tt.expectedData) { + return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData) + } + } + return nil +} + +func TestSeqFile(t *testing.T) { + testSource := &seqTest{} + testSource.Init() + + // Create a file that can be R/W. + m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + ctx := contexttest.Context(t) + contents := map[string]*fs.Inode{ + "foo": NewSeqFileInode(ctx, testSource, m), + } + root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777)) + + // How about opening it? + inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory}) + dirent2, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo for n2: %v", err) + } + n2 := dirent2.Inode.InodeOperations + + // Writing? + if _, err := n2.DeprecatedPwritev(nil, usermem.BytesIOSequence([]byte("test")), 0); err == nil { + t.Fatalf("managed to write to n2: %v", err) + } + + // How about reading? + dirent3, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo: %v", err) + } + n3 := dirent3.Inode.InodeOperations + + if n2 != n3 { + t.Error("got n2 != n3, want same") + } + + testSource.update = true + + table := []testTable{ + // Read past the end. + {100, 4, []byte{}, io.EOF}, + {110, 4, []byte{}, io.EOF}, + {200, 4, []byte{}, io.EOF}, + // Read a truncated first line. + {0, 4, testSource.actual[0].Buf[:4], nil}, + // Read the whole first line. + {0, 10, testSource.actual[0].Buf, nil}, + // Read the whole first line + 5 bytes of second line. + {0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil}, + // First 4 bytes of the second line. + {10, 4, testSource.actual[1].Buf[:4], nil}, + // Read the two first lines. + {0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil}, + // Read three lines. + {0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil}, + // Read everything, but use a bigger buffer than necessary. + {0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil}, + // Read the last 3 bytes. + {97, 10, testSource.actual[9].Buf[7:], nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err) + } + + // Disable updates and do it again. + testSource.update = false + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err) + } +} + +// Test that we behave correctly when the file is updated. +func TestSeqFileFileUpdated(t *testing.T) { + testSource := &seqTest{} + testSource.Init() + testSource.update = true + + // Create a file that can be R/W. + m := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + ctx := contexttest.Context(t) + contents := map[string]*fs.Inode{ + "foo": NewSeqFileInode(ctx, testSource, m), + } + root := ramfstest.NewDir(ctx, contents, fs.FilePermsFromMode(0777)) + + // How about opening it? + inode := fs.NewInode(root, m, fs.StableAttr{Type: fs.Directory}) + dirent2, err := root.Lookup(ctx, inode, "foo") + if err != nil { + t.Fatalf("failed to walk to foo for n2: %v", err) + } + n2 := dirent2.Inode.InodeOperations + + table := []testTable{ + {0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed: %v", err) + } + // Delete the first entry. + cut := testSource.actual[0].Buf + testSource.actual = testSource.actual[1:] + + table = []testTable{ + // Try reading buffer 0 with an offset. This will not delete the old data. + {1, 5, cut[1:6], nil}, + // Reset our file by reading at offset 0. + {0, 10, testSource.actual[0].Buf, nil}, + {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil}, + // Read the same data a second time. + {16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil}, + // Read the following two lines. + {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed after removing first entry: %v", err) + } + + // Add a new duplicate line in the middle (6666...) + after := testSource.actual[5:] + testSource.actual = testSource.actual[:4] + // Note the list must be sorted. + testSource.actual = append(testSource.actual, after[0]) + testSource.actual = append(testSource.actual, after...) + + table = []testTable{ + {50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed after adding middle entry: %v", err) + } + // This will be used in a later test. + oldTestData := testSource.actual + + // Delete everything. + testSource.actual = testSource.actual[:0] + table = []testTable{ + {20, 20, []byte{}, io.EOF}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed after removing all entries: %v", err) + } + // Restore some of the data. + testSource.actual = oldTestData[:1] + table = []testTable{ + {6, 20, testSource.actual[0].Buf[6:], nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed after adding first entry back: %v", err) + } + + // Re-extend the data + testSource.actual = oldTestData + table = []testTable{ + {30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil}, + } + if err := runTableTests(ctx, table, n2); err != nil { + t.Errorf("runTableTest failed after extending testSource: %v", err) + } +} diff --git a/pkg/sentry/fs/proc/stat.go b/pkg/sentry/fs/proc/stat.go new file mode 100644 index 000000000..dee836a05 --- /dev/null +++ b/pkg/sentry/fs/proc/stat.go @@ -0,0 +1,139 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// statData backs /proc/stat. +type statData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*statData) NeedsUpdate(generation int64) bool { + return true +} + +// cpuStats contains the breakdown of CPU time for /proc/stat. +type cpuStats struct { + // user is time spent in userspace tasks with non-positive niceness. + user uint64 + + // nice is time spent in userspace tasks with positive niceness. + nice uint64 + + // system is time spent in non-interrupt kernel context. + system uint64 + + // idle is time spent idle. + idle uint64 + + // ioWait is time spent waiting for IO. + ioWait uint64 + + // irq is time spent in interrupt context. + irq uint64 + + // softirq is time spent in software interrupt context. + softirq uint64 + + // steal is involuntary wait time. + steal uint64 + + // guest is time spent in guests with non-positive niceness. + guest uint64 + + // guestNice is time spent in guests with positive niceness. + guestNice uint64 +} + +// String implements fmt.Stringer. +func (c cpuStats) String() string { + return fmt.Sprintf("%d %d %d %d %d %d %d %d %d %d", c.user, c.nice, c.system, c.idle, c.ioWait, c.irq, c.softirq, c.steal, c.guest, c.guestNice) +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (s *statData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + // TODO: We currently export only zero CPU stats. We could + // at least provide some aggregate stats. + var cpu cpuStats + fmt.Fprintf(&buf, "cpu %s\n", cpu) + + for c, max := uint(0), s.k.ApplicationCores(); c < max; c++ { + fmt.Fprintf(&buf, "cpu%d %s\n", c, cpu) + } + + // The total number of interrupts is dependent on the CPUs and PCI + // devices on the system. See arch_probe_nr_irqs. + // + // Since we don't report real interrupt stats, just choose an arbitrary + // value from a representative VM. + const numInterrupts = 256 + + // The Kernel doesn't handle real interrupts, so report all zeroes. + // TODO: We could count page faults as #PF. + fmt.Fprintf(&buf, "intr 0") // total + for i := 0; i < numInterrupts; i++ { + fmt.Fprintf(&buf, " 0") + } + fmt.Fprintf(&buf, "\n") + + // Total number of context switches. + // TODO: Count this. + fmt.Fprintf(&buf, "ctxt 0\n") + + // CLOCK_REALTIME timestamp from boot, in seconds. + fmt.Fprintf(&buf, "btime %d\n", s.k.Timekeeper().BootTime().Seconds()) + + // Total number of clones. + // TODO: Count this. + fmt.Fprintf(&buf, "processes 0\n") + + // Number of runnable tasks. + // TODO: Count this. + fmt.Fprintf(&buf, "procs_running 0\n") + + // Number of tasks waiting on IO. + // TODO: Count this. + fmt.Fprintf(&buf, "procs_blocked 0\n") + + // Number of each softirq handled. + fmt.Fprintf(&buf, "softirq 0") // total + for i := 0; i < linux.NumSoftIRQ; i++ { + fmt.Fprintf(&buf, " 0") + } + fmt.Fprintf(&buf, "\n") + + return []seqfile.SeqData{ + { + Buf: buf.Bytes(), + Handle: (*statData)(nil), + }, + }, 0 +} diff --git a/pkg/sentry/fs/proc/sys.go b/pkg/sentry/fs/proc/sys.go new file mode 100644 index 000000000..4323f3650 --- /dev/null +++ b/pkg/sentry/fs/proc/sys.go @@ -0,0 +1,117 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// hostname is a file containing the system hostname. +type hostname struct { + ramfs.Entry +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (hostname) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + utsns := kernel.UTSNamespaceFromContext(ctx) + contents := []byte(utsns.HostName() + "\n") + + if offset >= int64(len(contents)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, contents[offset:]) + return int64(n), err +} + +func (p *proc) newHostname(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + h := &hostname{} + h.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444)) + return newFile(h, msrc, fs.SpecialFile, nil) +} + +// mmapMinAddrData backs /proc/sys/vm/mmap_min_addr. +type mmapMinAddrData struct { + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*mmapMinAddrData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (d *mmapMinAddrData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + return []seqfile.SeqData{ + { + Buf: []byte(fmt.Sprintf("%d\n", d.k.Platform.MinUserAddress())), + Handle: (*mmapMinAddrData)(nil), + }, + }, 0 +} + +type overcommitMemory struct{} + +func (*overcommitMemory) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource. +func (*overcommitMemory) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + return []seqfile.SeqData{ + { + Buf: []byte("0\n"), + Handle: (*overcommitMemory)(nil), + }, + }, 0 +} + +func (p *proc) newKernelDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + d.AddChild(ctx, "hostname", p.newHostname(ctx, msrc)) + return newFile(d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newVMDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + d.AddChild(ctx, "mmap_min_addr", seqfile.NewSeqFileInode(ctx, &mmapMinAddrData{p.k}, msrc)) + d.AddChild(ctx, "overcommit_memory", seqfile.NewSeqFileInode(ctx, &overcommitMemory{}, msrc)) + return newFile(d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newSysDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + d.AddChild(ctx, "kernel", p.newKernelDir(ctx, msrc)) + d.AddChild(ctx, "vm", p.newVMDir(ctx, msrc)) + d.AddChild(ctx, "net", p.newSysNetDir(ctx, msrc)) + return newFile(d, msrc, fs.SpecialDirectory, nil) +} diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go new file mode 100644 index 000000000..db44c95cb --- /dev/null +++ b/pkg/sentry/fs/proc/sys_net.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type tcpMemDir int + +const ( + tcpRMem tcpMemDir = iota + tcpWMem +) + +type tcpMem struct { + ramfs.Entry + s inet.Stack + size inet.TCPBufferSize + dir tcpMemDir +} + +func newTCPMem(s inet.Stack, size inet.TCPBufferSize, dir tcpMemDir) *tcpMem { + return &tcpMem{s: s, size: size, dir: dir} +} + +func newTCPMemInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack, size inet.TCPBufferSize, dir tcpMemDir) *fs.Inode { + tm := newTCPMem(s, size, dir) + tm.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0644)) + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(tm, msrc, sattr) +} + +// DeprecatedPreadv implements fs.InodeOperations.DeprecatedPreadv. +func (m *tcpMem) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset != 0 { + return 0, io.EOF + } + s := fmt.Sprintf("%d\t%d\t%d\n", m.size.Min, m.size.Default, m.size.Max) + n, err := dst.CopyOut(ctx, []byte(s)) + return int64(n), err +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*tcpMem) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev. +func (m *tcpMem) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + src = src.TakeFirst(usermem.PageSize - 1) + + buf := []int32{int32(m.size.Min), int32(m.size.Default), int32(m.size.Max)} + n, cperr := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, buf, src.Opts) + size := inet.TCPBufferSize{ + Min: int(buf[0]), + Default: int(buf[1]), + Max: int(buf[2]), + } + var err error + switch m.dir { + case tcpRMem: + err = m.s.SetTCPReceiveBufferSize(size) + case tcpWMem: + err = m.s.SetTCPSendBufferSize(size) + default: + panic(fmt.Sprintf("unknown tcpMem.dir: %v", m.dir)) + } + if err != nil { + return n, err + } + return n, cperr +} + +type tcpSack struct { + ramfs.Entry + s inet.Stack `state:"nosave"` // S/R-FIXME +} + +func newTCPSackInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode { + ts := &tcpSack{s: s} + ts.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0644)) + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialFile, + } + return fs.NewInode(ts, msrc, sattr) +} + +func (s *tcpSack) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset != 0 { + return 0, io.EOF + } + + sack, err := s.s.TCPSACKEnabled() + if err != nil { + return 0, err + } + + val := "0\n" + if sack { + // Technically, this is not quite compatible with Linux. Linux + // stores these as an integer, so if you write "2" into + // tcp_sack, you should get 2 back. Tough luck. + val = "1\n" + } + n, err := dst.CopyOut(ctx, []byte(val)) + return int64(n), err +} + +// Truncate implements fs.InodeOperations.Truncate. +func (*tcpSack) Truncate(context.Context, *fs.Inode, int64) error { + return nil +} + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev. +func (s *tcpSack) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + if src.NumBytes() == 0 { + return 0, nil + } + src = src.TakeFirst(usermem.PageSize - 1) + + var v int32 + n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) + if err != nil { + return n, err + } + return n, s.s.SetTCPSACKEnabled(v != 0) +} + +func newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + + // Add tcp_rmem. + if rs, err := s.TCPReceiveBufferSize(); err == nil { + d.AddChild(ctx, "tcp_rmem", newTCPMemInode(ctx, msrc, s, rs, tcpRMem)) + } + + // Add tcp_wmem. + if ss, err := s.TCPSendBufferSize(); err == nil { + d.AddChild(ctx, "tcp_wmem", newTCPMemInode(ctx, msrc, s, ss, tcpWMem)) + } + + // Add tcp_sack. + d.AddChild(ctx, "tcp_sack", newTCPSackInode(ctx, msrc, s)) + + return newFile(d, msrc, fs.SpecialDirectory, nil) +} + +func (p *proc) newSysNetDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(ctx, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + if s := p.k.NetworkStack(); s != nil { + d.AddChild(ctx, "ipv4", newSysNetIPv4Dir(ctx, msrc, s)) + } + return newFile(d, msrc, fs.SpecialDirectory, nil) +} diff --git a/pkg/sentry/fs/proc/sys_net_test.go b/pkg/sentry/fs/proc/sys_net_test.go new file mode 100644 index 000000000..7ba392346 --- /dev/null +++ b/pkg/sentry/fs/proc/sys_net_test.go @@ -0,0 +1,121 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func TestQuerySendBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + s.TCPSendBufSize = inet.TCPBufferSize{100, 200, 300} + tm := newTCPMem(s, s.TCPSendBufSize, tcpWMem) + + buf := make([]byte, 100) + dst := usermem.BytesIOSequence(buf) + n, err := tm.DeprecatedPreadv(ctx, dst, 0) + if err != nil { + t.Fatalf("DeprecatedPreadv failed: %v", err) + } + + if got, want := string(buf[:n]), "100\t200\t300\n"; got != want { + t.Fatalf("Bad string: got %v, want %v", got, want) + } +} + +func TestQueryRecvBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + s.TCPRecvBufSize = inet.TCPBufferSize{100, 200, 300} + tm := newTCPMem(s, s.TCPRecvBufSize, tcpRMem) + + buf := make([]byte, 100) + dst := usermem.BytesIOSequence(buf) + n, err := tm.DeprecatedPreadv(ctx, dst, 0) + if err != nil { + t.Fatalf("DeprecatedPreadv failed: %v", err) + } + + if got, want := string(buf[:n]), "100\t200\t300\n"; got != want { + t.Fatalf("Bad string: got %v, want %v", got, want) + } +} + +var cases = []struct { + str string + initial inet.TCPBufferSize + final inet.TCPBufferSize +}{ + { + str: "", + initial: inet.TCPBufferSize{1, 2, 3}, + final: inet.TCPBufferSize{1, 2, 3}, + }, + { + str: "100\n", + initial: inet.TCPBufferSize{1, 100, 200}, + final: inet.TCPBufferSize{100, 100, 200}, + }, + { + str: "100 200 300\n", + initial: inet.TCPBufferSize{1, 2, 3}, + final: inet.TCPBufferSize{100, 200, 300}, + }, +} + +func TestConfigureSendBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + for _, c := range cases { + s.TCPSendBufSize = c.initial + tm := newTCPMem(s, c.initial, tcpWMem) + + // Write the values. + src := usermem.BytesIOSequence([]byte(c.str)) + if n, err := tm.DeprecatedPwritev(ctx, src, 0); n != int64(len(c.str)) || err != nil { + t.Errorf("DeprecatedPwritev, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str)) + } + + // Read the values from the stack and check them. + if s.TCPSendBufSize != c.final { + t.Errorf("TCPSendBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPSendBufSize, c.final) + } + } +} + +func TestConfigureRecvBufferSize(t *testing.T) { + ctx := context.Background() + s := inet.NewTestStack() + for _, c := range cases { + s.TCPRecvBufSize = c.initial + tm := newTCPMem(s, c.initial, tcpRMem) + + // Write the values. + src := usermem.BytesIOSequence([]byte(c.str)) + if n, err := tm.DeprecatedPwritev(ctx, src, 0); n != int64(len(c.str)) || err != nil { + t.Errorf("DeprecatedPwritev, case = %q: got (%d, %v), wanted (%d, nil)", c.str, n, err, len(c.str)) + } + + // Read the values from the stack and check them. + if s.TCPRecvBufSize != c.final { + t.Errorf("TCPRecvBufferSize, case = %q: got %v, wanted %v", c.str, s.TCPRecvBufSize, c.final) + } + } +} diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go new file mode 100644 index 000000000..3e9a1e50e --- /dev/null +++ b/pkg/sentry/fs/proc/task.go @@ -0,0 +1,567 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + "io" + "sort" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// getTaskMM returns t's MemoryManager. If getTaskMM succeeds, the MemoryManager's +// users count is incremented, and must be decremented by the caller when it is +// no longer in use. +func getTaskMM(t *kernel.Task) (*mm.MemoryManager, error) { + if t.ExitState() == kernel.TaskExitDead { + return nil, syserror.ESRCH + } + var m *mm.MemoryManager + t.WithMuLocked(func(t *kernel.Task) { + m = t.MemoryManager() + }) + if m == nil || !m.IncUsers() { + return nil, io.EOF + } + return m, nil +} + +// taskDir represents a task-level directory. +type taskDir struct { + ramfs.Dir + + // t is the associated kernel task that owns this file. + t *kernel.Task +} + +// newTaskDir creates a new proc task entry. +func newTaskDir(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace, showSubtasks bool) *fs.Inode { + d := &taskDir{t: t} + // TODO: Set EUID/EGID based on dumpability. + d.InitDir(t, map[string]*fs.Inode{ + "auxv": newAuxvec(t, msrc), + "cmdline": newExecArgFile(t, msrc, cmdlineExecArg), + "comm": newComm(t, msrc), + "environ": newExecArgFile(t, msrc, environExecArg), + "exe": newExe(t, msrc), + "fd": newFdDir(t, msrc), + "fdinfo": newFdInfoDir(t, msrc), + "gid_map": newGIDMap(t, msrc), + // TODO: This is incorrect for /proc/[pid]/task/[tid]/io, i.e. if + // showSubtasks is false: + // http://lxr.free-electrons.com/source/fs/proc/base.c?v=3.11#L2980 + "io": newIO(t, msrc), + "maps": newMaps(t, msrc), + "mountinfo": seqfile.NewSeqFileInode(t, &mountInfoFile{t: t}, msrc), + "mounts": seqfile.NewSeqFileInode(t, &mountsFile{t: t}, msrc), + "ns": newNamespaceDir(t, msrc), + "stat": newTaskStat(t, msrc, showSubtasks, pidns), + "status": newStatus(t, msrc, pidns), + "uid_map": newUIDMap(t, msrc), + }, fs.RootOwner, fs.FilePermsFromMode(0555)) + if showSubtasks { + d.AddChild(t, "task", newSubtasks(t, msrc, pidns)) + } + return newFile(d, msrc, fs.SpecialDirectory, t) +} + +// subtasks represents a /proc/TID/task directory. +type subtasks struct { + ramfs.Dir + + t *kernel.Task + + pidns *kernel.PIDNamespace +} + +func newSubtasks(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace) *fs.Inode { + s := &subtasks{t: t, pidns: pidns} + s.InitDir(t, nil, fs.RootOwner, fs.FilePermsFromMode(0555)) + return newFile(s, msrc, fs.SpecialDirectory, t) +} + +// UnstableAttr returns unstable attributes of the subtasks. +func (s *subtasks) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, err := s.Dir.UnstableAttr(ctx, inode) + if err != nil { + return fs.UnstableAttr{}, err + } + // We can't rely on ramfs' implementation because the task directories are + // generated dynamically. + uattr.Links = uint64(2 + s.t.ThreadGroup().Count()) + return uattr, nil +} + +// Lookup loads an Inode in a task's subtask directory into a Dirent. +func (s *subtasks) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + tid, err := strconv.ParseUint(p, 10, 32) + if err != nil { + return nil, syserror.ENOENT + } + + task := s.pidns.TaskWithID(kernel.ThreadID(tid)) + if task == nil { + return nil, syserror.ENOENT + } + if task.ThreadGroup() != s.t.ThreadGroup() { + return nil, syserror.ENOENT + } + + td := newTaskDir(task, dir.MountSource, s.pidns, false) + return fs.NewDirent(td, p), nil +} + +// DeprecatedReaddir lists a task's subtask directory. +func (s *subtasks) DeprecatedReaddir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + tasks := s.t.ThreadGroup().MemberIDs(s.pidns) + taskInts := make([]int, 0, len(tasks)) + for _, tid := range tasks { + taskInts = append(taskInts, int(tid)) + } + + // Find the task to start at. + idx := sort.SearchInts(taskInts, offset) + if idx == len(taskInts) { + return offset, nil + } + taskInts = taskInts[idx:] + + var tid int + for _, tid = range taskInts { + name := strconv.FormatUint(uint64(tid), 10) + attr := fs.GenericDentAttr(fs.SpecialDirectory, device.ProcDevice) + if err := dirCtx.DirEmit(name, attr); err != nil { + // Returned offset is next tid to serialize. + return tid, err + } + } + // We serialized them all. Next offset should be higher than last + // serialized tid. + return tid + 1, nil +} + +// exe is an fs.InodeOperations symlink for the /proc/PID/exe file. +type exe struct { + ramfs.Symlink + + t *kernel.Task +} + +func newExe(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + exeSymlink := &exe{t: t} + exeSymlink.InitSymlink(t, fs.RootOwner, "") + return newFile(exeSymlink, msrc, fs.Symlink, t) +} + +func (e *exe) executable() (d *fs.Dirent, err error) { + e.t.WithMuLocked(func(t *kernel.Task) { + mm := t.MemoryManager() + if mm == nil { + // TODO: Check shouldn't allow Readlink once the + // Task is zombied. + err = syserror.EACCES + return + } + + // The MemoryManager may be destroyed, in which case + // MemoryManager.destroy will simply set the executable to nil + // (with locks held). + d = mm.Executable() + if d == nil { + err = syserror.ENOENT + } + }) + return +} + +// Readlink implements fs.InodeOperations. +func (e *exe) Readlink(ctx context.Context, inode *fs.Inode) (string, error) { + if !kernel.ContextCanTrace(ctx, e.t, false) { + return "", syserror.EACCES + } + + // Pull out the executable for /proc/TID/exe. + exec, err := e.executable() + if err != nil { + return "", err + } + defer exec.DecRef() + + root := fs.RootFromContext(ctx) + if root == nil { + // This doesn't correspond to anything in Linux because the vfs is + // global there. + return "", syserror.EINVAL + } + defer root.DecRef() + n, _ := exec.FullName(root) + return n, nil +} + +// namespaceFile represents a file in the namespacefs, such as the files in +// /proc/<pid>/ns. +type namespaceFile struct { + ramfs.Symlink + + t *kernel.Task +} + +func newNamespaceFile(t *kernel.Task, msrc *fs.MountSource, name string) *fs.Inode { + n := &namespaceFile{t: t} + n.InitSymlink(t, fs.RootOwner, "") + + // TODO: Namespace symlinks should contain the namespace name and the + // inode number for the namespace instance, so for example user:[123456]. We + // currently fake the inode number by sticking the symlink inode in its + // place. + n.Target = fmt.Sprintf("%s:[%d]", name, device.ProcDevice.NextIno()) + + return newFile(n, msrc, fs.Symlink, t) +} + +// Getlink implements fs.InodeOperations.Getlink. +func (n *namespaceFile) Getlink(ctx context.Context, inode *fs.Inode) (*fs.Dirent, error) { + if !kernel.ContextCanTrace(ctx, n.t, false) { + return nil, syserror.EACCES + } + + // Create a new regular file to fake the namespace file. + node := &ramfs.Entry{} + node.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0777)) + sattr := fs.StableAttr{ + DeviceID: device.ProcDevice.DeviceID(), + InodeID: device.ProcDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.RegularFile, + } + return fs.NewDirent(fs.NewInode(node, inode.MountSource, sattr), n.Symlink.Target), nil +} + +func newNamespaceDir(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + d := &ramfs.Dir{} + d.InitDir(t, map[string]*fs.Inode{ + "net": newNamespaceFile(t, msrc, "net"), + "pid": newNamespaceFile(t, msrc, "pid"), + "user": newNamespaceFile(t, msrc, "user"), + }, fs.RootOwner, fs.FilePermsFromMode(0511)) + return newFile(d, msrc, fs.SpecialDirectory, t) +} + +// mapsData implements seqfile.SeqSource for /proc/[pid]/maps. +type mapsData struct { + t *kernel.Task +} + +func newMaps(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newFile(seqfile.NewSeqFile(t, &mapsData{t}), msrc, fs.SpecialFile, t) +} + +func (md *mapsData) mm() *mm.MemoryManager { + var tmm *mm.MemoryManager + md.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + // No additional reference is taken on mm here. This is safe + // because MemoryManager.destroy is required to leave the + // MemoryManager in a state where it's still usable as a SeqSource. + tmm = mm + } + }) + return tmm +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (md *mapsData) NeedsUpdate(generation int64) bool { + if mm := md.mm(); mm != nil { + return mm.NeedsUpdate(generation) + } + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (md *mapsData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if mm := md.mm(); mm != nil { + return mm.ReadSeqFileData(md.t.AsyncContext(), h) + } + return []seqfile.SeqData{}, 0 +} + +type taskStatData struct { + t *kernel.Task + + // If tgstats is true, accumulate fault stats (not implemented) and CPU + // time across all tasks in t's thread group. + tgstats bool + + // pidns is the PID namespace associated with the proc filesystem that + // includes the file using this statData. + pidns *kernel.PIDNamespace +} + +func newTaskStat(t *kernel.Task, msrc *fs.MountSource, showSubtasks bool, pidns *kernel.PIDNamespace) *fs.Inode { + return newFile(seqfile.NewSeqFile(t, &taskStatData{t, showSubtasks /* tgstats */, pidns}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate returns whether the generation is old or not. +func (s *taskStatData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (s *taskStatData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfTask(s.t)) + fmt.Fprintf(&buf, "(%s) ", s.t.Name()) + fmt.Fprintf(&buf, "%c ", s.t.StateStatus()[0]) + ppid := kernel.ThreadID(0) + if parent := s.t.Parent(); parent != nil { + ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup()) + } + fmt.Fprintf(&buf, "%d ", ppid) + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfProcessGroup(s.t.ThreadGroup().ProcessGroup())) + fmt.Fprintf(&buf, "%d ", s.pidns.IDOfSession(s.t.ThreadGroup().Session())) + fmt.Fprintf(&buf, "0 0 " /* tty_nr tpgid */) + fmt.Fprintf(&buf, "0 " /* flags */) + fmt.Fprintf(&buf, "0 0 0 0 " /* minflt cminflt majflt cmajflt */) + var cputime usage.CPUStats + if s.tgstats { + cputime = s.t.ThreadGroup().CPUStats() + } else { + cputime = s.t.CPUStats() + } + fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime)) + cputime = s.t.ThreadGroup().JoinedChildCPUStats() + fmt.Fprintf(&buf, "%d %d ", linux.ClockTFromDuration(cputime.UserTime), linux.ClockTFromDuration(cputime.SysTime)) + fmt.Fprintf(&buf, "%d %d ", s.t.Priority(), s.t.Niceness()) + fmt.Fprintf(&buf, "%d ", s.t.ThreadGroup().Count()) + fmt.Fprintf(&buf, "0 0 " /* itrealvalue starttime */) + var vss, rss uint64 + s.t.WithMuLocked(func(t *kernel.Task) { + if mm := t.MemoryManager(); mm != nil { + vss = mm.VirtualMemorySize() + rss = mm.ResidentSetSize() + } + }) + fmt.Fprintf(&buf, "%d %d ", vss, rss/usermem.PageSize) + fmt.Fprintf(&buf, "0 0 0 0 0 0 " /* rsslim startcode endcode startstack kstkesp kstkeip */) + fmt.Fprintf(&buf, "0 0 0 0 0 " /* signal blocked sigignore sigcatch wchan */) + fmt.Fprintf(&buf, "0 0 " /* nswap cnswap */) + terminationSignal := linux.Signal(0) + if s.t == s.t.ThreadGroup().Leader() { + terminationSignal = s.t.ThreadGroup().TerminationSignal() + } + fmt.Fprintf(&buf, "%d ", terminationSignal) + fmt.Fprintf(&buf, "0 0 0 " /* processor rt_priority policy */) + fmt.Fprintf(&buf, "0 0 0 " /* delayacct_blkio_ticks guest_time cguest_time */) + fmt.Fprintf(&buf, "0 0 0 0 0 0 0 " /* start_data end_data start_brk arg_start arg_end env_start env_end */) + fmt.Fprintf(&buf, "0\n" /* exit_code */) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*taskStatData)(nil)}}, 0 +} + +// statusData implements seqfile.SeqSource for /proc/[pid]/status. +type statusData struct { + t *kernel.Task + pidns *kernel.PIDNamespace +} + +func newStatus(t *kernel.Task, msrc *fs.MountSource, pidns *kernel.PIDNamespace) *fs.Inode { + return newFile(seqfile.NewSeqFile(t, &statusData{t, pidns}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (s *statusData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (s *statusData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + var buf bytes.Buffer + fmt.Fprintf(&buf, "Name:\t%s\n", s.t.Name()) + fmt.Fprintf(&buf, "State:\t%s\n", s.t.StateStatus()) + fmt.Fprintf(&buf, "Tgid:\t%d\n", s.pidns.IDOfThreadGroup(s.t.ThreadGroup())) + fmt.Fprintf(&buf, "Pid:\t%d\n", s.pidns.IDOfTask(s.t)) + ppid := kernel.ThreadID(0) + if parent := s.t.Parent(); parent != nil { + ppid = s.pidns.IDOfThreadGroup(parent.ThreadGroup()) + } + fmt.Fprintf(&buf, "PPid:\t%d\n", ppid) + tpid := kernel.ThreadID(0) + if tracer := s.t.Tracer(); tracer != nil { + tpid = s.pidns.IDOfTask(tracer) + } + fmt.Fprintf(&buf, "TracerPid:\t%d\n", tpid) + var fds int + var vss, rss uint64 + s.t.WithMuLocked(func(t *kernel.Task) { + if fdm := t.FDMap(); fdm != nil { + fds = fdm.Size() + } + if mm := t.MemoryManager(); mm != nil { + vss = mm.VirtualMemorySize() + rss = mm.ResidentSetSize() + } + }) + fmt.Fprintf(&buf, "FDSize:\t%d\n", fds) + fmt.Fprintf(&buf, "VmSize:\t%d kB\n", vss>>10) + fmt.Fprintf(&buf, "VmRSS:\t%d kB\n", rss>>10) + fmt.Fprintf(&buf, "Threads:\t%d\n", s.t.ThreadGroup().Count()) + creds := s.t.Credentials() + fmt.Fprintf(&buf, "CapInh:\t%016x\n", creds.InheritableCaps) + fmt.Fprintf(&buf, "CapPrm:\t%016x\n", creds.PermittedCaps) + fmt.Fprintf(&buf, "CapEff:\t%016x\n", creds.EffectiveCaps) + fmt.Fprintf(&buf, "CapBnd:\t%016x\n", creds.BoundingCaps) + fmt.Fprintf(&buf, "Seccomp:\t%d\n", s.t.SeccompMode()) + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*statusData)(nil)}}, 0 +} + +// ioUsage is the /proc/<pid>/io and /proc/<pid>/task/<tid>/io data provider. +type ioUsage interface { + // IOUsage returns the io usage data. + IOUsage() *usage.IO +} + +type ioData struct { + ioUsage +} + +func newIO(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newFile(seqfile.NewSeqFile(t, &ioData{t.ThreadGroup()}), msrc, fs.SpecialFile, t) +} + +// NeedsUpdate returns whether the generation is old or not. +func (i *ioData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData returns data for the SeqFile reader. +// SeqData, the current generation and where in the file the handle corresponds to. +func (i *ioData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + io := usage.IO{} + io.Accumulate(i.IOUsage()) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "char: %d\n", io.CharsRead) + fmt.Fprintf(&buf, "wchar: %d\n", io.CharsWritten) + fmt.Fprintf(&buf, "syscr: %d\n", io.ReadSyscalls) + fmt.Fprintf(&buf, "syscw: %d\n", io.WriteSyscalls) + fmt.Fprintf(&buf, "read_bytes: %d\n", io.BytesRead) + fmt.Fprintf(&buf, "write_bytes: %d\n", io.BytesWritten) + fmt.Fprintf(&buf, "cancelled_write_bytes: %d\n", io.BytesWriteCancelled) + + return []seqfile.SeqData{{Buf: buf.Bytes(), Handle: (*ioData)(nil)}}, 0 +} + +// comm is a file containing the command name for a task. +// +// On Linux, /proc/[pid]/comm is writable, and writing to the comm file changes +// the thread name. We don't implement this yet as there are no known users of +// this feature. +type comm struct { + ramfs.Entry + + t *kernel.Task +} + +// newComm returns a new comm file. +func newComm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + c := &comm{t: t} + c.InitEntry(t, fs.RootOwner, fs.FilePermsFromMode(0444)) + return newFile(c, msrc, fs.SpecialFile, t) +} + +// DeprecatedPreadv reads the current command name. +func (c *comm) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + buf := []byte(c.t.Name() + "\n") + if offset >= int64(len(buf)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, buf[offset:]) + return int64(n), err +} + +// auxvec is a file containing the auxiliary vector for a task. +type auxvec struct { + ramfs.Entry + + t *kernel.Task +} + +// newAuxvec returns a new auxvec file. +func newAuxvec(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + a := &auxvec{t: t} + a.InitEntry(t, fs.RootOwner, fs.FilePermsFromMode(0400)) + return newFile(a, msrc, fs.SpecialFile, t) +} + +// DeprecatedPreadv reads the current auxiliary vector. +func (a *auxvec) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + m, err := getTaskMM(a.t) + if err != nil { + return 0, err + } + defer m.DecUsers(ctx) + auxv := m.Auxv() + + // Space for buffer with AT_NULL (0) terminator at the end. + size := (len(auxv) + 1) * 16 + if offset >= int64(size) { + return 0, io.EOF + } + + buf := make([]byte, size) + for i, e := range auxv { + usermem.ByteOrder.PutUint64(buf[16*i:], e.Key) + usermem.ByteOrder.PutUint64(buf[16*i+8:], uint64(e.Value)) + } + + n, err := dst.CopyOut(ctx, buf[offset:]) + return int64(n), err +} diff --git a/pkg/sentry/fs/proc/uid_gid_map.go b/pkg/sentry/fs/proc/uid_gid_map.go new file mode 100644 index 000000000..a2a070bdd --- /dev/null +++ b/pkg/sentry/fs/proc/uid_gid_map.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// An idMapSeqSource is a seqfile.SeqSource that returns UID or GID mappings +// from a task's user namespace. +type idMapSeqSource struct { + t *kernel.Task + gids bool +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (imss *idMapSeqSource) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (imss *idMapSeqSource) ReadSeqFileData(handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + var start int + if handle != nil { + start = handle.(*idMapSeqHandle).value + } + var entries []auth.IDMapEntry + if imss.gids { + entries = imss.t.UserNamespace().GIDMap() + } else { + entries = imss.t.UserNamespace().UIDMap() + } + var data []seqfile.SeqData + i := 1 + for _, e := range entries { + if i > start { + data = append(data, seqfile.SeqData{ + Buf: idMapLineFromEntry(e), + Handle: &idMapSeqHandle{i}, + }) + } + i++ + } + return data, 0 +} + +// TODO: Fix issue requiring idMapSeqHandle wrapping an int. +type idMapSeqHandle struct { + value int +} + +type idMapSeqFile struct { + seqfile.SeqFile +} + +// newUIDMap returns a new uid_map file. +func newUIDMap(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newIDMap(t, msrc, false /* gids */) +} + +// newGIDMap returns a new gid_map file. +func newGIDMap(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { + return newIDMap(t, msrc, true /* gids */) +} + +func newIDMap(t *kernel.Task, msrc *fs.MountSource, gids bool) *fs.Inode { + imsf := &idMapSeqFile{seqfile.SeqFile{SeqSource: &idMapSeqSource{ + t: t, + gids: gids, + }}} + imsf.InitEntry(t, fs.RootOwner, fs.FilePermsFromMode(0644)) + return newFile(imsf, msrc, fs.SpecialFile, t) +} + +func (imsf *idMapSeqFile) source() *idMapSeqSource { + return imsf.SeqFile.SeqSource.(*idMapSeqSource) +} + +// "There is an (arbitrary) limit on the number of lines in the file. As at +// Linux 3.18, the limit is five lines." - user_namespaces(7) +const maxIDMapLines = 5 + +// DeprecatedPwritev implements fs.InodeOperations.DeprecatedPwritev. +func (imsf *idMapSeqFile) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // "In addition, the number of bytes written to the file must be less than + // the system page size, and the write must be performed at the start of + // the file ..." - user_namespaces(7) + srclen := src.NumBytes() + if srclen >= usermem.PageSize || offset != 0 { + return 0, syserror.EINVAL + } + b := make([]byte, srclen) + if _, err := src.CopyIn(ctx, b); err != nil { + return 0, err + } + lines := bytes.SplitN(bytes.TrimSpace(b), []byte("\n"), maxIDMapLines+1) + if len(lines) > maxIDMapLines { + return 0, syserror.EINVAL + } + entries := make([]auth.IDMapEntry, len(lines)) + for i, l := range lines { + e, err := idMapEntryFromLine(string(l)) + if err != nil { + return 0, syserror.EINVAL + } + entries[i] = e + } + t := imsf.source().t + var err error + if imsf.source().gids { + err = t.UserNamespace().SetGIDMap(ctx, entries) + } else { + err = t.UserNamespace().SetUIDMap(ctx, entries) + } + if err != nil { + return 0, err + } + return int64(len(b)), nil +} + +func idMapLineFromEntry(e auth.IDMapEntry) []byte { + var b bytes.Buffer + fmt.Fprintf(&b, "%10d %10d %10d\n", e.FirstID, e.FirstParentID, e.Length) + return b.Bytes() +} + +func idMapEntryFromLine(line string) (auth.IDMapEntry, error) { + var e auth.IDMapEntry + _, err := fmt.Sscan(line, &e.FirstID, &e.FirstParentID, &e.Length) + return e, err +} diff --git a/pkg/sentry/fs/proc/uptime.go b/pkg/sentry/fs/proc/uptime.go new file mode 100644 index 000000000..4679d5821 --- /dev/null +++ b/pkg/sentry/fs/proc/uptime.go @@ -0,0 +1,61 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// uptime is a file containing the system uptime. +type uptime struct { + ramfs.Entry + + // The "start time" of the sandbox. + startTime ktime.Time +} + +// newUptime returns a new uptime file. +func (p *proc) newUptime(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + u := &uptime{ + startTime: ktime.NowFromContext(ctx), + } + u.InitEntry(ctx, fs.RootOwner, fs.FilePermsFromMode(0444)) + return newFile(u, msrc, fs.SpecialFile, nil) +} + +// DeprecatedPreadv reads the current uptime. +func (u *uptime) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + + now := ktime.NowFromContext(ctx) + // Pretend that we've spent zero time sleeping (second number). + s := []byte(fmt.Sprintf("%.2f 0.00\n", now.Sub(u.startTime).Seconds())) + if offset >= int64(len(s)) { + return 0, io.EOF + } + + n, err := dst.CopyOut(ctx, s[offset:]) + return int64(n), err +} diff --git a/pkg/sentry/fs/proc/version.go b/pkg/sentry/fs/proc/version.go new file mode 100644 index 000000000..df3040d37 --- /dev/null +++ b/pkg/sentry/fs/proc/version.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proc + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// versionData backs /proc/version. +type versionData struct { + // k is the owning Kernel. + k *kernel.Kernel +} + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (*versionData) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. +func (v *versionData) ReadSeqFileData(h seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + if h != nil { + return nil, 0 + } + + init := v.k.GlobalInit() + if init == nil { + // Attempted to read before the init Task is created. This can + // only occur during startup, which should never need to read + // this file. + panic("Attempted to read version before initial Task is available") + } + + // /proc/version takes the form: + // + // "SYSNAME version RELEASE (COMPILE_USER@COMPILE_HOST) + // (COMPILER_VERSION) VERSION" + // + // where: + // - SYSNAME, RELEASE, and VERSION are the same as returned by + // sys_utsname + // - COMPILE_USER is the user that build the kernel + // - COMPILE_HOST is the hostname of the machine on which the kernel + // was built + // - COMPILER_VERSION is the version reported by the building compiler + // + // Since we don't really want to expose build information to + // applications, those fields are omitted. + // + // FIXME: Using Version from the init task SyscallTable + // disregards the different version a task may have (e.g., in a uts + // namespace). + ver := init.Leader().SyscallTable().Version + return []seqfile.SeqData{ + { + Buf: []byte(fmt.Sprintf("%s version %s %s\n", ver.Sysname, ver.Release, ver.Version)), + Handle: (*versionData)(nil), + }, + }, 0 +} diff --git a/pkg/sentry/fs/ramfs/BUILD b/pkg/sentry/fs/ramfs/BUILD new file mode 100644 index 000000000..663a1aeb9 --- /dev/null +++ b/pkg/sentry/fs/ramfs/BUILD @@ -0,0 +1,62 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "ramfs_state", + srcs = [ + "dir.go", + "file.go", + "ramfs.go", + "socket.go", + "symlink.go", + ], + out = "ramfs_state.go", + package = "ramfs", +) + +go_library( + name = "ramfs", + srcs = [ + "dir.go", + "file.go", + "ramfs.go", + "ramfs_state.go", + "socket.go", + "symlink.go", + "tree.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/amutex", + "//pkg/log", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel/time", + "//pkg/sentry/memmap", + "//pkg/sentry/safemem", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_test( + name = "ramfs_test", + size = "small", + srcs = ["tree_test.go"], + embed = [":ramfs"], + deps = [ + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + ], +) diff --git a/pkg/sentry/fs/ramfs/dir.go b/pkg/sentry/fs/ramfs/dir.go new file mode 100644 index 000000000..bf4cd8dfd --- /dev/null +++ b/pkg/sentry/fs/ramfs/dir.go @@ -0,0 +1,364 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// CreateOps represents operations to create different file types. +type CreateOps struct { + // NewDir creates a new directory. + NewDir func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) + + // NewFile creates a new file. + NewFile func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) + + // NewSymlink creates a new symlink with permissions 0777. + NewSymlink func(ctx context.Context, dir *fs.Inode, target string) (*fs.Inode, error) + + // NewBoundEndpoint creates a new socket. + NewBoundEndpoint func(ctx context.Context, dir *fs.Inode, ep unix.BoundEndpoint, perms fs.FilePermissions) (*fs.Inode, error) + + // NewFifo creates a new fifo. + NewFifo func(ctx context.Context, dir *fs.Inode, perm fs.FilePermissions) (*fs.Inode, error) +} + +// Dir represents a single directory in the filesystem. +type Dir struct { + Entry + + // CreateOps may be provided. + // + // These may only be modified during initialization (while the application + // is not running). No sychronization is performed when accessing these + // operations during syscalls. + *CreateOps `state:"nosave"` + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // children are inodes that are in this directory. A reference is held + // on each inode while it is in the map. + children map[string]*fs.Inode + + // dentryMap is a sortedDentryMap containing entries for all children. + // Its entries ar kept up-to-date with d.children. + dentryMap *fs.SortedDentryMap +} + +// InitDir initializes a directory. +func (d *Dir) InitDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwner, perms fs.FilePermissions) { + d.InitEntry(ctx, owner, perms) + if contents == nil { + contents = make(map[string]*fs.Inode) + } + d.children = contents + // Build the entries map ourselves, rather than calling addChildLocked, + // because it will be faster. + entries := make(map[string]fs.DentAttr, len(contents)) + for name, inode := range contents { + entries[name] = fs.DentAttr{ + Type: inode.StableAttr.Type, + InodeID: inode.StableAttr.InodeID, + } + } + d.dentryMap = fs.NewSortedDentryMap(entries) + + // Directories have an extra link, corresponding to '.'. + d.AddLink() +} + +// addChildLocked add the child inode, inheriting its reference. +func (d *Dir) addChildLocked(name string, inode *fs.Inode) { + d.children[name] = inode + d.dentryMap.Add(name, fs.DentAttr{ + Type: inode.StableAttr.Type, + InodeID: inode.StableAttr.InodeID, + }) + + // If the child is a directory, increment this dir's link count, + // corresponding to '..' from the subdirectory. + if fs.IsDir(inode.StableAttr) { + d.AddLink() + } + + // Given we're now adding this inode to the directory we must also + // increase its link count. Similiarly we decremented it in removeChildLocked. + inode.AddLink() +} + +// AddChild adds a child to this dir. +func (d *Dir) AddChild(ctx context.Context, name string, inode *fs.Inode) { + d.mu.Lock() + defer d.mu.Unlock() + d.addChildLocked(name, inode) +} + +// FindChild returns (child, true) if the directory contains name. +func (d *Dir) FindChild(name string) (*fs.Inode, bool) { + d.mu.Lock() + defer d.mu.Unlock() + child, ok := d.children[name] + return child, ok +} + +// removeChildLocked attempts to remove an entry from this directory. +// This Entry's mutex must be held. It returns the removed Inode. +func (d *Dir) removeChildLocked(ctx context.Context, name string) (*fs.Inode, error) { + inode, ok := d.children[name] + if !ok { + return nil, ErrNotFound + } + + delete(d.children, name) + d.dentryMap.Remove(name) + d.Entry.NotifyModification(ctx) + + // If the child was a subdirectory, then we must decrement this dir's + // link count which was the child's ".." directory entry. + if fs.IsDir(inode.StableAttr) { + d.DropLink() + } + + // Update ctime. + inode.NotifyStatusChange(ctx) + + // Given we're now removing this inode to the directory we must also + // decrease its link count. Similiarly it is increased in addChildLocked. + inode.DropLink() + + return inode, nil +} + +// RemoveEntry attempts to remove an entry from this directory. +func (d *Dir) RemoveEntry(ctx context.Context, name string) error { + d.mu.Lock() + defer d.mu.Unlock() + inode, err := d.removeChildLocked(ctx, name) + if err != nil { + return err + } + + // Remove our reference on the inode. + inode.DecRef() + return nil +} + +// Remove removes the named non-directory. +func (d *Dir) Remove(ctx context.Context, dir *fs.Inode, name string) error { + return d.RemoveEntry(ctx, name) +} + +// RemoveDirectory removes the named directory. +func (d *Dir) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + d.mu.Lock() + defer d.mu.Unlock() + + n, err := d.walkLocked(ctx, name) + if err != nil { + return err + } + dirCtx := &fs.DirCtx{} + if _, err := n.HandleOps().DeprecatedReaddir(ctx, dirCtx, 0); err != nil { + return err + } + if len(dirCtx.DentAttrs()) > 0 { + return ErrNotEmpty + } + inode, err := d.removeChildLocked(ctx, name) + if err != nil { + return err + } + + // Remove our reference on the inode. + inode.DecRef() + + return err +} + +// Lookup loads an inode at p into a Dirent. +func (d *Dir) Lookup(ctx context.Context, dir *fs.Inode, p string) (*fs.Dirent, error) { + d.mu.Lock() + defer d.mu.Unlock() + + inode, err := d.walkLocked(ctx, p) + if err != nil { + return nil, err + } + + // Take a reference on the inode before returning it. This reference + // is owned by the dirent we are about to create. + inode.IncRef() + return fs.NewDirent(inode, p), nil +} + +// walkLocked must be called with this Entry's mutex held. +func (d *Dir) walkLocked(ctx context.Context, p string) (*fs.Inode, error) { + d.Entry.NotifyAccess(ctx) + + // Lookup a child node. + if inode, ok := d.children[p]; ok { + return inode, nil + } + + // fs.InodeOperations.Lookup returns syserror.ENOENT if p + // does not exist. + return nil, syserror.ENOENT +} + +// createInodeOperationsCommon creates a new child node at this dir by calling +// makeInodeOperations. It is the common logic for creating a new child. +func (d *Dir) createInodeOperationsCommon(ctx context.Context, name string, makeInodeOperations func() (*fs.Inode, error)) (*fs.Inode, error) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.children[name]; ok { + return nil, syscall.EEXIST + } + + inode, err := makeInodeOperations() + if err != nil { + return nil, err + } + + d.addChildLocked(name, inode) + d.Entry.NotifyModification(ctx) + + return inode, nil +} + +// Create creates a new Inode with the given name and returns its File. +func (d *Dir) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perms fs.FilePermissions) (*fs.File, error) { + if d.CreateOps == nil || d.CreateOps.NewFile == nil { + return nil, ErrDenied + } + + inode, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewFile(ctx, dir, perms) + }) + if err != nil { + return nil, err + } + + // Take an extra ref on inode, which will be owned by the dirent. + inode.IncRef() + + // Create the Dirent and corresponding file. + created := fs.NewDirent(inode, name) + defer created.DecRef() + return created.Inode.GetFile(ctx, created, flags) +} + +// CreateLink returns a new link. +func (d *Dir) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error { + if d.CreateOps == nil || d.CreateOps.NewSymlink == nil { + return ErrDenied + } + _, err := d.createInodeOperationsCommon(ctx, newname, func() (*fs.Inode, error) { + return d.NewSymlink(ctx, dir, oldname) + }) + return err +} + +// CreateHardLink creates a new hard link. +func (d *Dir) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error { + d.mu.Lock() + defer d.mu.Unlock() + + // Take an extra reference on the inode and add it to our children. + target.IncRef() + + // The link count will be incremented in addChildLocked. + d.addChildLocked(name, target) + d.Entry.NotifyModification(ctx) + + // Update ctime. + target.NotifyStatusChange(ctx) + + return nil +} + +// CreateDirectory returns a new subdirectory. +func (d *Dir) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + if d.CreateOps == nil || d.CreateOps.NewDir == nil { + return ErrDenied + } + _, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewDir(ctx, dir, perms) + }) + // TODO: Support updating status times, as those should be + // updated by links. + return err +} + +// Bind implements fs.InodeOperations.Bind. +func (d *Dir) Bind(ctx context.Context, dir *fs.Inode, name string, ep unix.BoundEndpoint, perms fs.FilePermissions) error { + if d.CreateOps == nil || d.CreateOps.NewBoundEndpoint == nil { + return ErrDenied + } + _, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewBoundEndpoint(ctx, dir, ep, perms) + }) + if err == syscall.EEXIST { + return syscall.EADDRINUSE + } + return err +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +func (d *Dir) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perms fs.FilePermissions) error { + if d.CreateOps == nil || d.CreateOps.NewFifo == nil { + return ErrDenied + } + _, err := d.createInodeOperationsCommon(ctx, name, func() (*fs.Inode, error) { + return d.NewFifo(ctx, dir, perms) + }) + return err +} + +func (d *Dir) readdirLocked(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + // Serialize the entries in dentryMap. + n, err := fs.GenericReaddir(dirCtx, d.dentryMap) + + // Touch the access time. + d.Entry.NotifyAccess(ctx) + + return offset + n, err +} + +// DeprecatedReaddir emits the entries contained in this directory. +func (d *Dir) DeprecatedReaddir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + d.mu.Lock() + defer d.mu.Unlock() + return d.readdirLocked(ctx, dirCtx, offset) +} + +// DeprecatedPreadv always returns ErrIsDirectory +func (*Dir) DeprecatedPreadv(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, ErrIsDirectory +} + +// DeprecatedPwritev always returns ErrIsDirectory +func (*Dir) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, ErrIsDirectory +} diff --git a/pkg/sentry/fs/ramfs/file.go b/pkg/sentry/fs/ramfs/file.go new file mode 100644 index 000000000..e8363c3e2 --- /dev/null +++ b/pkg/sentry/fs/ramfs/file.go @@ -0,0 +1,148 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "io" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/secio" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// File represents a unique file. It uses a simple byte slice as storage, and +// thus should only be used for small files. +// +// A File is not mappable. +type File struct { + Entry + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // data tracks backing data for the file. + data []byte +} + +// InitFile initializes a file. +func (f *File) InitFile(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions) { + f.InitEntry(ctx, owner, perms) +} + +// UnstableAttr returns unstable attributes of this ramfs file. +func (f *File) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + f.mu.Lock() + defer f.mu.Unlock() + + uattr, _ := f.Entry.UnstableAttr(ctx, inode) + uattr.Size = int64(len(f.data)) + uattr.Usage = f.usageLocked() + + return uattr, nil +} + +// usageLocked returns the disk usage. Caller must hold f.mu. +func (f *File) usageLocked() int64 { + return int64(len(f.data)) +} + +// Append appends the given data. This is for internal use. +func (f *File) Append(data []byte) { + f.mu.Lock() + defer f.mu.Unlock() + f.data = append(f.data, data...) +} + +// Truncate truncates this node. +func (f *File) Truncate(ctx context.Context, inode *fs.Inode, l int64) error { + f.mu.Lock() + defer f.mu.Unlock() + if l < int64(len(f.data)) { + // Remove excess bytes. + f.data = f.data[:l] + return nil + } else if l > int64(len(f.data)) { + // Create a new slice with size l, and copy f.data into it. + d := make([]byte, l) + copy(d, f.data) + f.data = d + } + f.Entry.NotifyModification(ctx) + return nil +} + +// ReadAt implements io.ReaderAt. +func (f *File) ReadAt(data []byte, offset int64) (int, error) { + if offset < 0 { + return 0, ErrInvalidOp + } + if offset >= int64(len(f.data)) { + return 0, io.EOF + } + n := copy(data, f.data[offset:]) + // Did we read past the end? + if offset+int64(len(data)) >= int64(len(f.data)) { + return n, io.EOF + } + return n, nil +} + +// DeprecatedPreadv reads into a collection of slices from a given offset. +func (f *File) DeprecatedPreadv(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + if offset >= int64(len(f.data)) { + return 0, io.EOF + } + n, err := dst.CopyOut(ctx, f.data[offset:]) + if n > 0 { + f.Entry.NotifyAccess(ctx) + } + return int64(n), err +} + +// WriteAt implements io.WriterAt. +func (f *File) WriteAt(data []byte, offset int64) (int, error) { + if offset < 0 { + return 0, ErrInvalidOp + } + newLen := offset + int64(len(data)) + if newLen < 0 { + // Overflow. + return 0, syserror.EINVAL + } + if newLen > int64(len(f.data)) { + // Copy f.data into new slice with expanded length. + d := make([]byte, newLen) + copy(d, f.data) + f.data = d + } + return copy(f.data[offset:], data), nil +} + +// DeprecatedPwritev writes from a collection of slices at a given offset. +func (f *File) DeprecatedPwritev(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + n, err := src.CopyInTo(ctx, safemem.FromIOWriter{secio.NewOffsetWriter(f, offset)}) + if n > 0 { + f.Entry.NotifyModification(ctx) + } + return n, err +} diff --git a/pkg/sentry/fs/ramfs/ramfs.go b/pkg/sentry/fs/ramfs/ramfs.go new file mode 100644 index 000000000..04f2d38de --- /dev/null +++ b/pkg/sentry/fs/ramfs/ramfs.go @@ -0,0 +1,433 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ramfs implements an in-memory file system that can be associated with +// any device. +package ramfs + +import ( + "errors" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +var ( + // ErrInvalidOp indicates the operation is not valid. + ErrInvalidOp = errors.New("invalid operation") + + // ErrDenied indicates the operation was denid. + ErrDenied = errors.New("operation denied") + + // ErrNotFound indicates that a node was not found on a walk. + ErrNotFound = errors.New("node not found") + + // ErrCrossDevice indicates a cross-device link or rename. + ErrCrossDevice = errors.New("can't link across filesystems") + + // ErrIsDirectory indicates that the operation failed because + // the node is a directory. + ErrIsDirectory = errors.New("is a directory") + + // ErrNotDirectory indicates that the operation failed because + // the node is a not directory. + ErrNotDirectory = errors.New("not a directory") + + // ErrNotEmpty indicates that the operation failed because the + // directory is not empty. + ErrNotEmpty = errors.New("directory not empty") +) + +// Entry represents common internal state for file and directory nodes. +// This may be used by other packages to easily create ramfs files. +type Entry struct { + waiter.AlwaysReady `state:"nosave"` + fsutil.NoMappable `state:"nosave"` + fsutil.NoopWriteOut `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // unstable is unstable attributes. + unstable fs.UnstableAttr + + // xattrs are the extended attributes of the Entry. + xattrs map[string][]byte +} + +// InitEntry initializes an entry. +func (e *Entry) InitEntry(ctx context.Context, owner fs.FileOwner, p fs.FilePermissions) { + e.InitEntryWithAttr(ctx, fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: p, + // Always start unlinked. + Links: 0, + })) +} + +// InitEntryWithAttr initializes an entry with a complete set of attributes. +func (e *Entry) InitEntryWithAttr(ctx context.Context, uattr fs.UnstableAttr) { + e.unstable = uattr + e.xattrs = make(map[string][]byte) +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (e *Entry) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + e.mu.Lock() + defer e.mu.Unlock() + return e.unstable, nil +} + +// Check implements fs.InodeOperations.Check. +func (*Entry) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// Getxattr implements fs.InodeOperations.Getxattr. +func (e *Entry) Getxattr(inode *fs.Inode, name string) ([]byte, error) { + e.mu.Lock() + defer e.mu.Unlock() + if value, ok := e.xattrs[name]; ok { + return value, nil + } + return nil, syserror.ENOATTR +} + +// Setxattr implements fs.InodeOperations.Setxattr. +func (e *Entry) Setxattr(inode *fs.Inode, name string, value []byte) error { + e.mu.Lock() + defer e.mu.Unlock() + e.xattrs[name] = value + return nil +} + +// Listxattr implements fs.InodeOperations.Listxattr. +func (e *Entry) Listxattr(inode *fs.Inode) (map[string]struct{}, error) { + e.mu.Lock() + defer e.mu.Unlock() + names := make(map[string]struct{}, len(e.xattrs)) + for name := range e.xattrs { + names[name] = struct{}{} + } + return names, nil +} + +// GetFile returns a fs.File backed by the dirent argument and flags. +func (*Entry) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fsutil.NewHandle(ctx, d, flags, d.Inode.HandleOps()), nil +} + +// SetPermissions always sets the permissions. +func (e *Entry) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + e.mu.Lock() + defer e.mu.Unlock() + e.unstable.Perms = p + e.unstable.StatusChangeTime = ktime.NowFromContext(ctx) + return true +} + +// SetOwner always sets ownership. +func (e *Entry) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + e.mu.Lock() + defer e.mu.Unlock() + if owner.UID.Ok() { + e.unstable.Owner.UID = owner.UID + } + if owner.GID.Ok() { + e.unstable.Owner.GID = owner.GID + } + return nil +} + +// SetTimestamps sets the timestamps. +func (e *Entry) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + e.mu.Lock() + defer e.mu.Unlock() + + now := ktime.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATimeSetSystemTime { + e.unstable.AccessTime = now + } else { + e.unstable.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTimeSetSystemTime { + e.unstable.ModificationTime = now + } else { + e.unstable.ModificationTime = ts.MTime + } + } + e.unstable.StatusChangeTime = now + return nil +} + +// NotifyStatusChange updates the status change time (ctime). +func (e *Entry) NotifyStatusChange(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + e.unstable.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// StatusChangeTime returns the last status change time for this node. +func (e *Entry) StatusChangeTime() ktime.Time { + e.mu.Lock() + defer e.mu.Unlock() + return e.unstable.StatusChangeTime +} + +// NotifyModification updates the modification time and the status change time. +func (e *Entry) NotifyModification(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + now := ktime.NowFromContext(ctx) + e.unstable.ModificationTime = now + e.unstable.StatusChangeTime = now +} + +// ModificationTime returns the last modification time for this node. +func (e *Entry) ModificationTime() ktime.Time { + e.mu.Lock() + defer e.mu.Unlock() + return e.unstable.ModificationTime +} + +// NotifyAccess updates the access time. +func (e *Entry) NotifyAccess(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + now := ktime.NowFromContext(ctx) + e.unstable.AccessTime = now +} + +// AccessTime returns the last access time for this node. +func (e *Entry) AccessTime() ktime.Time { + e.mu.Lock() + defer e.mu.Unlock() + return e.unstable.AccessTime +} + +// Permissions returns permissions on this entry. +func (e *Entry) Permissions() fs.FilePermissions { + e.mu.Lock() + defer e.mu.Unlock() + return e.unstable.Perms +} + +// Lookup is not supported by default. +func (*Entry) Lookup(context.Context, *fs.Inode, string) (*fs.Dirent, error) { + return nil, ErrInvalidOp +} + +// Create is not supported by default. +func (*Entry) Create(context.Context, *fs.Inode, string, fs.FileFlags, fs.FilePermissions) (*fs.File, error) { + return nil, ErrInvalidOp +} + +// CreateLink is not supported by default. +func (*Entry) CreateLink(context.Context, *fs.Inode, string, string) error { + return ErrInvalidOp +} + +// CreateHardLink is not supported by default. +func (*Entry) CreateHardLink(context.Context, *fs.Inode, *fs.Inode, string) error { + return ErrInvalidOp +} + +// IsVirtual returns true. +func (*Entry) IsVirtual() bool { + return true +} + +// CreateDirectory is not supported by default. +func (*Entry) CreateDirectory(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return ErrInvalidOp +} + +// Bind is not supported by default. +func (*Entry) Bind(context.Context, *fs.Inode, string, unix.BoundEndpoint, fs.FilePermissions) error { + return ErrInvalidOp +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. CreateFifo is not supported by +// default. +func (*Entry) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error { + return ErrInvalidOp +} + +// Remove is not supported by default. +func (*Entry) Remove(context.Context, *fs.Inode, string) error { + return ErrInvalidOp +} + +// RemoveDirectory is not supported by default. +func (*Entry) RemoveDirectory(context.Context, *fs.Inode, string) error { + return ErrInvalidOp +} + +// StatFS always returns ENOSYS. +func (*Entry) StatFS(context.Context) (fs.Info, error) { + return fs.Info{}, syscall.ENOSYS +} + +// Rename implements fs.InodeOperations.Rename. +func (e *Entry) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return Rename(ctx, oldParent.InodeOperations, oldName, newParent.InodeOperations, newName) +} + +// Rename renames from a *ramfs.Dir to another *ramfs.Dir. +func Rename(ctx context.Context, oldParent fs.InodeOperations, oldName string, newParent fs.InodeOperations, newName string) error { + op, ok := oldParent.(*Dir) + if !ok { + return ErrCrossDevice + } + np, ok := newParent.(*Dir) + if !ok { + return ErrCrossDevice + } + + np.mu.Lock() + defer np.mu.Unlock() + + // Check whether the ramfs entry to be replaced is a non-empty directory. + if replaced, ok := np.children[newName]; ok { + if fs.IsDir(replaced.StableAttr) { + // FIXME: simplify by pinning children of ramfs-backed directories + // in the Dirent tree: this allows us to generalize ramfs operations without + // relying on an implementation of Readdir (which may do anything, like require + // that the file be open ... which would be reasonable). + dirCtx := &fs.DirCtx{} + _, err := replaced.HandleOps().DeprecatedReaddir(ctx, dirCtx, 0) + if err != nil { + return err + } + attrs := dirCtx.DentAttrs() + + // ramfs-backed directories should not contain "." and "..", but we do this + // just in case. + delete(attrs, ".") + delete(attrs, "..") + + // If the directory to be replaced is not empty, reject the rename. + if len(attrs) != 0 { + return ErrNotEmpty + } + } + } + + // Be careful, we may have already grabbed this mutex above. + if op != np { + op.mu.Lock() + defer op.mu.Unlock() + } + + // Do the swap. + n := op.children[oldName] + op.removeChildLocked(ctx, oldName) + np.addChildLocked(newName, n) + + // Update ctime. + n.NotifyStatusChange(ctx) + + return nil +} + +// Truncate is not supported by default. +func (*Entry) Truncate(context.Context, *fs.Inode, int64) error { + return ErrInvalidOp +} + +// Readlink always returns ENOLINK. +func (*Entry) Readlink(context.Context, *fs.Inode) (string, error) { + return "", syscall.ENOLINK +} + +// Getlink always returns ENOLINK. +func (*Entry) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + return nil, syscall.ENOLINK +} + +// Release is a no-op. +func (e *Entry) Release(context.Context) {} + +// AddLink implements InodeOperationss.AddLink. +func (e *Entry) AddLink() { + e.mu.Lock() + defer e.mu.Unlock() + e.unstable.Links++ +} + +// DropLink implements InodeOperationss.DropLink. +func (e *Entry) DropLink() { + e.mu.Lock() + defer e.mu.Unlock() + e.unstable.Links-- +} + +// DeprecatedReaddir is not supported by default. +func (*Entry) DeprecatedReaddir(context.Context, *fs.DirCtx, int) (int, error) { + return 0, ErrNotDirectory +} + +// DeprecatedPreadv always returns ErrInvalidOp. +func (*Entry) DeprecatedPreadv(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, ErrInvalidOp +} + +// DeprecatedPwritev always returns ErrInvalidOp. +func (*Entry) DeprecatedPwritev(context.Context, usermem.IOSequence, int64) (int64, error) { + return 0, ErrInvalidOp +} + +// DeprecatedFsync is a noop. +func (*Entry) DeprecatedFsync() error { + // Ignore, this is in memory. + return nil +} + +// DeprecatedFlush always returns nil. +func (*Entry) DeprecatedFlush() error { + return nil +} + +// DeprecatedMappable implements fs.InodeOperations.DeprecatedMappable. +func (*Entry) DeprecatedMappable(context.Context, *fs.Inode) (memmap.Mappable, bool) { + return nil, false +} + +func init() { + // Register ramfs errors. + syserror.AddErrorTranslation(ErrInvalidOp, syscall.EINVAL) + syserror.AddErrorTranslation(ErrDenied, syscall.EACCES) + syserror.AddErrorTranslation(ErrNotFound, syscall.ENOENT) + syserror.AddErrorTranslation(ErrCrossDevice, syscall.EXDEV) + syserror.AddErrorTranslation(ErrIsDirectory, syscall.EISDIR) + syserror.AddErrorTranslation(ErrNotDirectory, syscall.ENOTDIR) + syserror.AddErrorTranslation(ErrNotEmpty, syscall.ENOTEMPTY) +} diff --git a/pkg/sentry/fs/ramfs/socket.go b/pkg/sentry/fs/ramfs/socket.go new file mode 100644 index 000000000..b0c79325f --- /dev/null +++ b/pkg/sentry/fs/ramfs/socket.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// Socket represents a socket. +type Socket struct { + Entry + + // ep is the bound endpoint. + ep unix.BoundEndpoint +} + +// InitSocket initializes a socket. +func (s *Socket) InitSocket(ctx context.Context, ep unix.BoundEndpoint, owner fs.FileOwner, perms fs.FilePermissions) { + s.InitEntry(ctx, owner, perms) + s.ep = ep +} + +// BoundEndpoint returns the socket data. +func (s *Socket) BoundEndpoint(*fs.Inode, string) unix.BoundEndpoint { + // ramfs only supports stored sentry internal sockets. Only gofer sockets + // care about the path argument. + return s.ep +} diff --git a/pkg/sentry/fs/ramfs/symlink.go b/pkg/sentry/fs/ramfs/symlink.go new file mode 100644 index 000000000..9bbf78619 --- /dev/null +++ b/pkg/sentry/fs/ramfs/symlink.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// Symlink represents a symlink. +type Symlink struct { + Entry + + mu sync.Mutex `state:"nosave"` + + // Target is the symlink target. + Target string +} + +// InitSymlink initializes a symlink, pointing to the given target. +// A symlink is assumed to always have permissions 0777. +func (s *Symlink) InitSymlink(ctx context.Context, owner fs.FileOwner, target string) { + s.InitEntry(ctx, owner, fs.FilePermsFromMode(0777)) + s.Target = target +} + +// UnstableAttr returns all attributes of this ramfs symlink. +func (s *Symlink) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + uattr, _ := s.Entry.UnstableAttr(ctx, inode) + uattr.Size = int64(len(s.Target)) + uattr.Usage = uattr.Size + return uattr, nil +} + +// Check implements InodeOperations.Check. +func (s *Symlink) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions on a symlink is always rejected. +func (s *Symlink) SetPermissions(context.Context, *fs.Inode, fs.FilePermissions) bool { + return false +} + +// Readlink reads the symlink value. +func (s *Symlink) Readlink(ctx context.Context, _ *fs.Inode) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + s.Entry.NotifyAccess(ctx) + return s.Target, nil +} + +// Getlink returns ErrResolveViaReadlink, falling back to walking to the result +// of Readlink(). +func (*Symlink) Getlink(context.Context, *fs.Inode) (*fs.Dirent, error) { + return nil, fs.ErrResolveViaReadlink +} diff --git a/pkg/sentry/fs/ramfs/test/BUILD b/pkg/sentry/fs/ramfs/test/BUILD new file mode 100644 index 000000000..074b0f5ad --- /dev/null +++ b/pkg/sentry/fs/ramfs/test/BUILD @@ -0,0 +1,31 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "test_state", + srcs = [ + "test.go", + ], + out = "test_state.go", + package = "test", +) + +go_library( + name = "test", + testonly = 1, + srcs = [ + "test.go", + "test_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs/test", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ramfs", + "//pkg/state", + ], +) diff --git a/pkg/sentry/fs/ramfs/test/test.go b/pkg/sentry/fs/ramfs/test/test.go new file mode 100644 index 000000000..fb669558f --- /dev/null +++ b/pkg/sentry/fs/ramfs/test/test.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package test provides a simple ramfs-based filesystem for use in testing. +package test + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" +) + +// Dir is a simple ramfs.Dir that supports save/restore as-is. +type Dir struct { + ramfs.Dir +} + +// NewDir returns a simple ramfs directory with the passed contents. +func NewDir(ctx context.Context, contents map[string]*fs.Inode, perms fs.FilePermissions) *Dir { + d := &Dir{} + d.InitDir(ctx, contents, fs.RootOwner, perms) + return d +} + +// File is a simple ramfs.File that supports save/restore as-is. +type File struct { + ramfs.File +} + +// NewFile returns a simple ramfs File. +func NewFile(ctx context.Context, perms fs.FilePermissions) *File { + f := &File{} + f.InitFile(ctx, fs.RootOwner, perms) + return f +} diff --git a/pkg/sentry/fs/ramfs/tree.go b/pkg/sentry/fs/ramfs/tree.go new file mode 100644 index 000000000..1fb335f74 --- /dev/null +++ b/pkg/sentry/fs/ramfs/tree.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "fmt" + "path" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// MakeDirectoryTree constructs a ramfs tree of all directories containing +// subdirs. Each element of subdir must be a clean path, and cannot be empty or +// "/". +func MakeDirectoryTree(ctx context.Context, msrc *fs.MountSource, subdirs []string) (*fs.Inode, error) { + root := emptyDir(ctx, msrc) + for _, subdir := range subdirs { + if path.Clean(subdir) != subdir { + return nil, fmt.Errorf("cannot add subdir at an unclean path: %q", subdir) + } + if subdir == "" || subdir == "/" { + return nil, fmt.Errorf("cannot add subdir at %q", subdir) + } + makeSubdir(ctx, msrc, root.InodeOperations.(*Dir), subdir) + } + return root, nil +} + +// makeSubdir installs into root each component of subdir. The final component is +// a *ramfs.Dir. +func makeSubdir(ctx context.Context, msrc *fs.MountSource, root *Dir, subdir string) { + for _, c := range strings.Split(subdir, "/") { + if len(c) == 0 { + continue + } + child, ok := root.FindChild(c) + if !ok { + child = emptyDir(ctx, msrc) + root.AddChild(ctx, c, child) + } + root = child.InodeOperations.(*Dir) + } +} + +// emptyDir returns an empty *ramfs.Dir that is traversable but not writable. +func emptyDir(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + dir := &Dir{} + dir.InitDir(ctx, make(map[string]*fs.Inode), fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(dir, msrc, fs.StableAttr{ + DeviceID: anon.PseudoDevice.DeviceID(), + InodeID: anon.PseudoDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} diff --git a/pkg/sentry/fs/ramfs/tree_test.go b/pkg/sentry/fs/ramfs/tree_test.go new file mode 100644 index 000000000..68e2929d5 --- /dev/null +++ b/pkg/sentry/fs/ramfs/tree_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ramfs + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +func TestMakeDirectoryTree(t *testing.T) { + mount := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + + for _, test := range []struct { + name string + subdirs []string + }{ + { + name: "abs paths", + subdirs: []string{ + "/tmp", + "/tmp/a/b", + "/tmp/a/c/d", + "/tmp/c", + "/proc", + "/dev/a/b", + "/tmp", + }, + }, + { + name: "rel paths", + subdirs: []string{ + "tmp", + "tmp/a/b", + "tmp/a/c/d", + "tmp/c", + "proc", + "dev/a/b", + "tmp", + }, + }, + } { + ctx := contexttest.Context(t) + tree, err := MakeDirectoryTree(ctx, mount, test.subdirs) + if err != nil { + t.Errorf("%s: failed to make ramfs tree, got error %v, want nil", test.name, err) + continue + } + + // Expect to be able to find each of the paths. + mm, err := fs.NewMountNamespace(ctx, tree) + if err != nil { + t.Errorf("%s: failed to create mount manager: %v", test.name, err) + continue + } + root := mm.Root() + defer mm.DecRef() + + for _, p := range test.subdirs { + if _, err := mm.FindInode(ctx, root, nil, p, 0); err != nil { + t.Errorf("%s: failed to find node %s: %v", test.name, p, err) + break + } + } + } +} diff --git a/pkg/sentry/fs/restore.go b/pkg/sentry/fs/restore.go new file mode 100644 index 000000000..b4ac85a27 --- /dev/null +++ b/pkg/sentry/fs/restore.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "sync" +) + +// RestoreEnvironment is the restore environment for file systems. It consists +// of things that change across save and restore and therefore cannot be saved +// in the object graph. +type RestoreEnvironment struct { + // MountSources maps Filesystem.Name() to mount arguments. + MountSources map[string][]MountArgs + + // ValidateFileSize indicates file size should not change across S/R. + ValidateFileSize bool + + // ValidateFileTimestamp indicates file modification timestamp should + // not change across S/R. + ValidateFileTimestamp bool +} + +// MountArgs holds arguments to Mount. +type MountArgs struct { + // Dev corresponds to the devname argumnent of Mount. + Dev string + + // Flags corresponds to the flags argument of Mount. + Flags MountSourceFlags + + // Data corresponds to the data argument of Mount. + Data string +} + +// restoreEnv holds the fs package global RestoreEnvironment. +var restoreEnv = struct { + mu sync.Mutex + env RestoreEnvironment + set bool +}{} + +// SetRestoreEnvironment sets the RestoreEnvironment. Must be called before +// state.Load and only once. +func SetRestoreEnvironment(r RestoreEnvironment) { + restoreEnv.mu.Lock() + defer restoreEnv.mu.Unlock() + if restoreEnv.set { + panic("RestoreEnvironment may only be set once") + } + restoreEnv.env = r + restoreEnv.set = true +} + +// CurrentRestoreEnvironment returns the current, read-only RestoreEnvironment. +// If no RestoreEnvironment was ever set, returns (_, false). +func CurrentRestoreEnvironment() (RestoreEnvironment, bool) { + restoreEnv.mu.Lock() + defer restoreEnv.mu.Unlock() + e := restoreEnv.env + set := restoreEnv.set + return e, set +} diff --git a/pkg/sentry/fs/save.go b/pkg/sentry/fs/save.go new file mode 100644 index 000000000..bf2a85143 --- /dev/null +++ b/pkg/sentry/fs/save.go @@ -0,0 +1,77 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +// SaveInodeMappings saves a mapping of path -> inode ID for every +// user-reachable Dirent. +// +// The entire kernel must be frozen to call this, and filesystem state must not +// change between SaveInodeMappings and state.Save, otherwise the saved state +// of any MountSource may be incoherent. +func SaveInodeMappings() { + mountsSeen := make(map[*MountSource]struct{}) + for dirent := range allDirents.dirents { + if _, ok := mountsSeen[dirent.Inode.MountSource]; !ok { + dirent.Inode.MountSource.ResetInodeMappings() + mountsSeen[dirent.Inode.MountSource] = struct{}{} + } + } + + for dirent := range allDirents.dirents { + if dirent.Inode != nil { + // We cannot trust the root provided in the mount due + // to the overlay. We can trust the overlay to delegate + // SaveInodeMappings to the right underlying + // filesystems, though. + root := dirent + for !root.mounted && root.parent != nil { + root = root.parent + } + + // Add the mapping. + n, reachable := dirent.FullName(root) + if !reachable { + // Something has gone seriously wrong if we can't reach our root. + panic(fmt.Sprintf("Unreachable root on dirent file %s", n)) + } + dirent.Inode.MountSource.SaveInodeMapping(dirent.Inode, n) + } + } +} + +// SaveFileFsyncError converts an fs.File.Fsync error to an error that +// indicates that the fs.File was not synced sufficiently to be saved. +func SaveFileFsyncError(err error) error { + switch err { + case nil: + // We succeeded, everything is great. + return nil + case syscall.EBADF, syscall.EINVAL, syscall.EROFS, syscall.ENOSYS, syscall.EPERM: + // These errors mean that the underlying node might not be syncable, + // which we expect to be reported as such even from the gofer. + log.Infof("failed to sync during save: %v", err) + return nil + default: + // We failed in some way that indicates potential data loss. + return fmt.Errorf("failed to sync: %v, data loss may occur", err) + } +} diff --git a/pkg/sentry/fs/seek.go b/pkg/sentry/fs/seek.go new file mode 100644 index 000000000..1268726c2 --- /dev/null +++ b/pkg/sentry/fs/seek.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// SeekWhence determines seek direction. +type SeekWhence int + +const ( + // SeekSet sets the absolute offset. + SeekSet SeekWhence = iota + + // SeekCurrent sets relative to the current position. + SeekCurrent + + // SeekEnd sets relative to the end of the file. + SeekEnd +) + +// String returns a human readable string for whence. +func (s SeekWhence) String() string { + switch s { + case SeekSet: + return "Set" + case SeekCurrent: + return "Current" + case SeekEnd: + return "End" + default: + return "Unknown" + } +} diff --git a/pkg/sentry/fs/sync.go b/pkg/sentry/fs/sync.go new file mode 100644 index 000000000..9738a8f22 --- /dev/null +++ b/pkg/sentry/fs/sync.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fs + +// SyncType enumerates ways in which a File can be synced. +type SyncType int + +const ( + // SyncAll indicates that modified in-memory metadata and data should + // be written to backing storage. SyncAll implies SyncBackingStorage. + SyncAll SyncType = iota + + // SyncData indicates that along with modified in-memory data, only + // metadata needed to access that data needs to be written. + // + // For example, changes to access time or modification time do not + // need to be written because they are not necessary for a data read + // to be handled correctly, unlike the file size. + // + // The aim of SyncData is to reduce disk activity for applications + // that do not require all metadata to be synchronized with the disk, + // see fdatasync(2). File systems that implement SyncData as SyncAll + // do not support this optimization. + // + // SyncData implies SyncBackingStorage. + SyncData + + // SyncBackingStorage indicates that in-flight write operations to + // backing storage should be flushed. + SyncBackingStorage +) diff --git a/pkg/sentry/fs/sys/BUILD b/pkg/sentry/fs/sys/BUILD new file mode 100644 index 000000000..0ae2cbac8 --- /dev/null +++ b/pkg/sentry/fs/sys/BUILD @@ -0,0 +1,34 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "sys_state", + srcs = [ + "fs.go", + "sys.go", + ], + out = "sys_state.go", + package = "sys", +) + +go_library( + name = "sys", + srcs = [ + "device.go", + "fs.go", + "sys.go", + "sys_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/sys", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/usermem", + "//pkg/state", + ], +) diff --git a/pkg/sentry/fs/sys/device.go b/pkg/sentry/fs/sys/device.go new file mode 100644 index 000000000..54e414d1b --- /dev/null +++ b/pkg/sentry/fs/sys/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sys + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// sysfsDevice is the sysfs virtual device. +var sysfsDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/sys/fs.go b/pkg/sentry/fs/sys/fs.go new file mode 100644 index 000000000..f25f648c3 --- /dev/null +++ b/pkg/sentry/fs/sys/fs.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sys + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// filesystem is a sysfs. +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// FilesystemName is the name underwhich the filesystem is registered. +// Name matches fs/sysfs/mount.c:sysfs_fs_type.name. +const FilesystemName = "sysfs" + +// Name is the name of the file system. +func (*filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, sysfs returns FS_USERNS_VISIBLE | FS_USERNS_MOUNT, see fs/sysfs/mount.c. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a sysfs root which can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // device is always ignored. + // sysfs ignores data, see fs/sysfs/mount.c:sysfs_mount. + + return New(ctx, fs.NewNonCachingMountSource(f, flags)), nil +} diff --git a/pkg/sentry/fs/sys/sys.go b/pkg/sentry/fs/sys/sys.go new file mode 100644 index 000000000..ccf56f644 --- /dev/null +++ b/pkg/sentry/fs/sys/sys.go @@ -0,0 +1,57 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sys implements a sysfs filesystem. +package sys + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type Dir struct { + ramfs.Dir +} + +func newDir(ctx context.Context, msrc *fs.MountSource, contents map[string]*fs.Inode) *fs.Inode { + d := &Dir{} + d.InitDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555)) + return fs.NewInode(d, msrc, fs.StableAttr{ + DeviceID: sysfsDevice.DeviceID(), + InodeID: sysfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.SpecialDirectory, + }) +} + +// New returns the root node of a partial simple sysfs. +func New(ctx context.Context, msrc *fs.MountSource) *fs.Inode { + return newDir(ctx, msrc, map[string]*fs.Inode{ + // Add a basic set of top-level directories. In Linux, these + // are dynamically added depending on the KConfig. Here we just + // add the most common ones. + "block": newDir(ctx, msrc, nil), + "bus": newDir(ctx, msrc, nil), + "class": newDir(ctx, msrc, nil), + "dev": newDir(ctx, msrc, nil), + "devices": newDir(ctx, msrc, nil), + "firmware": newDir(ctx, msrc, nil), + "fs": newDir(ctx, msrc, nil), + "kernel": newDir(ctx, msrc, nil), + "module": newDir(ctx, msrc, nil), + "power": newDir(ctx, msrc, nil), + }) +} diff --git a/pkg/sentry/fs/timerfd/BUILD b/pkg/sentry/fs/timerfd/BUILD new file mode 100644 index 000000000..7fddc29f4 --- /dev/null +++ b/pkg/sentry/fs/timerfd/BUILD @@ -0,0 +1,35 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "timerfd_state", + srcs = [ + "timerfd.go", + ], + out = "timerfd_state.go", + package = "timerfd", +) + +go_library( + name = "timerfd", + srcs = [ + "timerfd.go", + "timerfd_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/timerfd", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/refs", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/fs/timerfd/timerfd.go b/pkg/sentry/fs/timerfd/timerfd.go new file mode 100644 index 000000000..ae58f6fd7 --- /dev/null +++ b/pkg/sentry/fs/timerfd/timerfd.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package timerfd implements the semantics of Linux timerfd objects as +// described by timerfd_create(2). +package timerfd + +import ( + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// TimerOperations implements fs.FileOperations for timerfds. +type TimerOperations struct { + fsutil.ZeroSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + + events waiter.Queue `state:"nosave"` + timer *ktime.Timer + + // val is the number of timer expirations since the last successful call to + // Readv, Preadv, or SetTime. val is accessed using atomic memory + // operations. + val uint64 +} + +// NewFile returns a timerfd File that receives time from c. +func NewFile(ctx context.Context, c ktime.Clock) *fs.File { + dirent := fs.NewDirent(anon.NewInode(ctx), "anon_inode:[timerfd]") + tops := &TimerOperations{} + tops.timer = ktime.NewTimer(c, tops) + // Timerfds reject writes, but the Write flag must be set in order to + // ensure that our Writev/Pwritev methods actually get called to return + // the correct errors. + return fs.NewFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}, tops) +} + +// Release implements fs.FileOperations.Release. +func (t *TimerOperations) Release() { + t.timer.Destroy() +} + +// PauseTimer pauses the associated Timer. +func (t *TimerOperations) PauseTimer() { + t.timer.Pause() +} + +// ResumeTimer resumes the associated Timer. +func (t *TimerOperations) ResumeTimer() { + t.timer.Resume() +} + +// Clock returns the associated Timer's Clock. +func (t *TimerOperations) Clock() ktime.Clock { + return t.timer.Clock() +} + +// GetTime returns the associated Timer's setting and the time at which it was +// observed. +func (t *TimerOperations) GetTime() (ktime.Time, ktime.Setting) { + return t.timer.Get() +} + +// SetTime atomically changes the associated Timer's setting, resets the number +// of expirations to 0, and returns the previous setting and the time at which +// it was observed. +func (t *TimerOperations) SetTime(s ktime.Setting) (ktime.Time, ktime.Setting) { + return t.timer.SwapAnd(s, func() { atomic.StoreUint64(&t.val, 0) }) +} + +// Readiness implements waiter.Waitable.Readiness. +func (t *TimerOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + var ready waiter.EventMask + if atomic.LoadUint64(&t.val) != 0 { + ready |= waiter.EventIn + } + return ready +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (t *TimerOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + t.events.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (t *TimerOperations) EventUnregister(e *waiter.Entry) { + t.events.EventUnregister(e) +} + +// Read implements fs.FileOperations.Read. +func (t *TimerOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + const sizeofUint64 = 8 + if dst.NumBytes() < sizeofUint64 { + return 0, syserror.EINVAL + } + if val := atomic.SwapUint64(&t.val, 0); val != 0 { + var buf [sizeofUint64]byte + usermem.ByteOrder.PutUint64(buf[:], val) + if _, err := dst.CopyOut(ctx, buf[:]); err != nil { + // Linux does not undo consuming the number of expirations even if + // writing to userspace fails. + return 0, err + } + return sizeofUint64, nil + } + return 0, syserror.ErrWouldBlock +} + +// Write implements fs.FileOperations.Write. +func (t *TimerOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EINVAL +} + +// Notify implements ktime.TimerListener.Notify. +func (t *TimerOperations) Notify(exp uint64) { + atomic.AddUint64(&t.val, exp) + t.events.Notify(waiter.EventIn) +} + +// Destroy implements ktime.TimerListener.Destroy. +func (t *TimerOperations) Destroy() {} diff --git a/pkg/sentry/fs/tmpfs/BUILD b/pkg/sentry/fs/tmpfs/BUILD new file mode 100644 index 000000000..be4e695d3 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/BUILD @@ -0,0 +1,64 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "tmpfs_state", + srcs = [ + "file_regular.go", + "fs.go", + "inode_file.go", + "tmpfs.go", + ], + out = "tmpfs_state.go", + package = "tmpfs", +) + +go_library( + name = "tmpfs", + srcs = [ + "device.go", + "file_regular.go", + "fs.go", + "inode_file.go", + "tmpfs.go", + "tmpfs_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/tmpfs", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/ramfs", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/pipe", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_test( + name = "tmpfs_test", + size = "small", + srcs = ["file_test.go"], + embed = [":tmpfs"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/platform", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/tmpfs/device.go b/pkg/sentry/fs/tmpfs/device.go new file mode 100644 index 000000000..e588b3440 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// tmpfsDevice is the kernel tmpfs device. +var tmpfsDevice = device.NewAnonDevice() diff --git a/pkg/sentry/fs/tmpfs/file_regular.go b/pkg/sentry/fs/tmpfs/file_regular.go new file mode 100644 index 000000000..9811d90bc --- /dev/null +++ b/pkg/sentry/fs/tmpfs/file_regular.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// regularFileOperations implements fs.FileOperations for a regular +// tmpfs file. +type regularFileOperations struct { + waiter.AlwaysReady `state:"nosave"` + fsutil.NoopRelease `state:"nosave"` + fsutil.GenericSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoopFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + + // iops is the InodeOperations of a regular tmpfs file. It is + // guaranteed to be the same as file.Dirent.Inode.InodeOperations, + // see operations that take fs.File below. + iops *fileInodeOperations +} + +// Read implements fs.FileOperations.Read. +func (r *regularFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + return r.iops.read(ctx, dst, offset) +} + +// Write implements fs.FileOperations.Write. +func (r *regularFileOperations) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + return r.iops.write(ctx, src, offset) +} + +// ConfigureMMap implements fs.FileOperations.ConfigureMMap. +func (r *regularFileOperations) ConfigureMMap(ctx context.Context, file *fs.File, opts *memmap.MMapOpts) error { + return fsutil.GenericConfigureMMap(file, r.iops, opts) +} diff --git a/pkg/sentry/fs/tmpfs/file_test.go b/pkg/sentry/fs/tmpfs/file_test.go new file mode 100644 index 000000000..f064eb1ac --- /dev/null +++ b/pkg/sentry/fs/tmpfs/file_test.go @@ -0,0 +1,73 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "bytes" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func newFileInode(ctx context.Context) *fs.Inode { + m := fs.NewCachingMountSource(&Filesystem{}, fs.MountSourceFlags{}) + iops := NewInMemoryFile(ctx, usage.Tmpfs, fs.WithCurrentTime(ctx, fs.UnstableAttr{}), platform.FromContext(ctx)) + return fs.NewInode(iops, m, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.RegularFile, + }) +} + +func newFile(ctx context.Context) *fs.File { + inode := newFileInode(ctx) + f, _ := inode.GetFile(ctx, fs.NewDirent(inode, "stub"), fs.FileFlags{Read: true, Write: true}) + return f +} + +// Allocate once, write twice. +func TestGrow(t *testing.T) { + ctx := contexttest.Context(t) + f := newFile(ctx) + defer f.DecRef() + + abuf := bytes.Repeat([]byte{'a'}, 68) + n, err := f.Pwritev(ctx, usermem.BytesIOSequence(abuf), 0) + if n != int64(len(abuf)) || err != nil { + t.Fatalf("DeprecatedPwritev got (%d, %v) want (%d, nil)", n, err, len(abuf)) + } + + bbuf := bytes.Repeat([]byte{'b'}, 856) + n, err = f.Pwritev(ctx, usermem.BytesIOSequence(bbuf), 68) + if n != int64(len(bbuf)) || err != nil { + t.Fatalf("DeprecatedPwritev got (%d, %v) want (%d, nil)", n, err, len(bbuf)) + } + + rbuf := make([]byte, len(abuf)+len(bbuf)) + n, err = f.Preadv(ctx, usermem.BytesIOSequence(rbuf), 0) + if n != int64(len(rbuf)) || err != nil { + t.Fatalf("DeprecatedPreadv got (%d, %v) want (%d, nil)", n, err, len(rbuf)) + } + + if want := append(abuf, bbuf...); !bytes.Equal(rbuf, want) { + t.Fatalf("Read %v, want %v", rbuf, want) + } +} diff --git a/pkg/sentry/fs/tmpfs/fs.go b/pkg/sentry/fs/tmpfs/fs.go new file mode 100644 index 000000000..639a19b0d --- /dev/null +++ b/pkg/sentry/fs/tmpfs/fs.go @@ -0,0 +1,131 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "fmt" + "regexp" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" +) + +const ( + // Set initial permissions for the root directory. + modeKey = "mode" + + // UID for the root directory. + rootUIDKey = "uid" + + // GID for the root directory. + rootGIDKey = "gid" + + // TODO: support a tmpfs size limit. + // size = "size" + + // default permissions are read/write/execute. + defaultMode = 0777 +) + +// modeRegexp is the expected format of the mode option. +var modeRegexp = regexp.MustCompile("0[0-7][0-7][0-7]") + +// Filesystem is a tmpfs. +type Filesystem struct{} + +func init() { + fs.RegisterFilesystem(&Filesystem{}) +} + +// FilesystemName is the name underwhich the filesystem is registered. +// Name matches mm/shmem.c:shmem_fs_type.name. +const FilesystemName = "tmpfs" + +// Name is the name of the file system. +func (*Filesystem) Name() string { + return FilesystemName +} + +// AllowUserMount allows users to mount(2) this file system. +func (*Filesystem) AllowUserMount() bool { + return true +} + +// Flags returns that there is nothing special about this file system. +// +// In Linux, tmpfs returns FS_USERNS_MOUNT, see mm/shmem.c. +func (*Filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// Mount returns a tmpfs root that can be positioned in the vfs. +func (f *Filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // device is always ignored. + + // Parse generic comma-separated key=value options, this file system expects them. + options := fs.GenericMountSourceOptions(data) + + // Parse the root directory permissions. + perms := fs.FilePermsFromMode(defaultMode) + if m, ok := options[modeKey]; ok { + if !modeRegexp.MatchString(m) { + return nil, fmt.Errorf("unsupported mode value: 'mode=%s'", m) + } + // It's basically impossible that we error out at this point, + // maybe we should panic. + i, err := strconv.ParseUint(m, 8, 32) + if err != nil { + return nil, fmt.Errorf("mode value not parsable 'mode=%s': %v", m, err) + } + perms = fs.FilePermsFromMode(linux.FileMode(i)) + delete(options, modeKey) + } + + creds := auth.CredentialsFromContext(ctx) + owner := fs.FileOwnerFromContext(ctx) + if uidstr, ok := options[rootUIDKey]; ok { + uid, err := strconv.ParseInt(uidstr, 10, 32) + if err != nil { + return nil, fmt.Errorf("uid value not parsable 'uid=%d': %v", uid, err) + } + owner.UID = creds.UserNamespace.MapToKUID(auth.UID(uid)) + delete(options, rootUIDKey) + } + + if gidstr, ok := options[rootGIDKey]; ok { + gid, err := strconv.ParseInt(gidstr, 10, 32) + if err != nil { + return nil, fmt.Errorf("gid value not parsable 'gid=%d': %v", gid, err) + } + owner.GID = creds.UserNamespace.MapToKGID(auth.GID(gid)) + delete(options, rootGIDKey) + } + + // Fail if the caller passed us more options than we can parse. They may be + // expecting us to set something we can't set. + if len(options) > 0 { + return nil, fmt.Errorf("unsupported mount options: %v", options) + } + + // Construct a mount which will cache dirents. + msrc := fs.NewCachingMountSource(f, flags) + + // Construct the tmpfs root. + return NewDir(ctx, nil, owner, perms, msrc, platform.FromContext(ctx)), nil +} diff --git a/pkg/sentry/fs/tmpfs/inode_file.go b/pkg/sentry/fs/tmpfs/inode_file.go new file mode 100644 index 000000000..66bc934ae --- /dev/null +++ b/pkg/sentry/fs/tmpfs/inode_file.go @@ -0,0 +1,492 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tmpfs + +import ( + "io" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// fileInodeOperations implements fs.InodeOperations for a regular tmpfs file. +// These files are backed by FrameRegions allocated from a platform.Memory, +// and may be directly mapped. +// +// The tmpfs file memory is backed by FrameRegions, each of which is reference +// counted. frames maintains a single reference on each of the FrameRegions. +// Since these contain the contents of the file, the reference may only be +// decremented once this file is both deleted and all handles to the file have +// been closed. +// +// Mappable users may also call IncRefOn/DecRefOn, generally to indicate that +// they plan to use MapInto to map the file into an AddressSpace. These calls +// include an InvalidatorRegion associated with that reference. When the +// referenced portion of the file is removed (with Truncate), the associated +// InvalidatorRegion is invalidated. +type fileInodeOperations struct { + fsutil.DeprecatedFileOperations `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.NoopWriteOut `state:"nosave"` + + // platform is used to allocate memory that stores the file's contents. + platform platform.Platform + + // memUsage is the default memory usage that will be reported by this file. + memUsage usage.MemoryKind + + attrMu sync.Mutex `state:"nosave"` + + // attr contains the unstable metadata for the file. + // + // attr is protected by attrMu. attr.Unstable.Size is protected by both + // attrMu and dataMu; reading it requires locking either mutex, while + // mutating it requires locking both. + attr fsutil.InMemoryAttributes + + mapsMu sync.Mutex `state:"nosave"` + + // mappings tracks mappings of the file into memmap.MappingSpaces. + // + // mappings is protected by mapsMu. + mappings memmap.MappingSet + + dataMu sync.RWMutex `state:"nosave"` + + // data maps offsets into the file to offsets into platform.Memory() that + // store the file's data. + // + // data is protected by dataMu. + data fsutil.FileRangeSet +} + +// NewInMemoryFile returns a new file backed by p.Memory(). +func NewInMemoryFile(ctx context.Context, usage usage.MemoryKind, uattr fs.UnstableAttr, p platform.Platform) fs.InodeOperations { + return &fileInodeOperations{ + attr: fsutil.InMemoryAttributes{ + Unstable: uattr, + }, + platform: p, + memUsage: usage, + } +} + +// Release implements fs.InodeOperations.Release. +func (f *fileInodeOperations) Release(context.Context) { + f.dataMu.Lock() + defer f.dataMu.Unlock() + f.data.DropAll(f.platform.Memory()) +} + +// Mappable implements fs.InodeOperations.Mappable. +func (f *fileInodeOperations) Mappable(*fs.Inode) memmap.Mappable { + return f +} + +// Rename implements fs.InodeOperations.Rename. +func (*fileInodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return rename(ctx, oldParent, oldName, newParent, newName) +} + +// GetFile implements fs.InodeOperations.GetFile. +func (f *fileInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + flags.Pread = true + flags.Pwrite = true + return fs.NewFile(ctx, d, flags, ®ularFileOperations{iops: f}), nil +} + +// UnstableAttr returns unstable attributes of this tmpfs file. +func (f *fileInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + f.attrMu.Lock() + defer f.attrMu.Unlock() + f.dataMu.RLock() + defer f.dataMu.RUnlock() + attr := f.attr.Unstable + attr.Usage = int64(f.data.Span()) + return attr, nil +} + +// Getxattr implements fs.InodeOperations.Getxattr. +func (f *fileInodeOperations) Getxattr(inode *fs.Inode, name string) ([]byte, error) { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.Getxattr(name) +} + +// Setxattr implements fs.InodeOperations.Setxattr. +func (f *fileInodeOperations) Setxattr(inode *fs.Inode, name string, value []byte) error { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.Setxattr(name, value) +} + +// Listxattr implements fs.InodeOperations.Listxattr. +func (f *fileInodeOperations) Listxattr(inode *fs.Inode) (map[string]struct{}, error) { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.Listxattr() +} + +// Check implements fs.InodeOperations.Check. +func (f *fileInodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (f *fileInodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.SetPermissions(ctx, p) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (f *fileInodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.SetTimestamps(ctx, ts) +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (f *fileInodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + f.attrMu.Lock() + defer f.attrMu.Unlock() + return f.attr.SetOwner(ctx, owner) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (f *fileInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + f.attrMu.Lock() + defer f.attrMu.Unlock() + + f.dataMu.Lock() + oldSize := f.attr.Unstable.Size + if oldSize != size { + f.attr.Unstable.Size = size + f.attr.TouchModificationTime(ctx) + } + f.dataMu.Unlock() + + // Nothing left to do unless shrinking the file. + if oldSize <= size { + return nil + } + + oldpgend := fs.OffsetPageEnd(oldSize) + newpgend := fs.OffsetPageEnd(size) + + // Invalidate past translations of truncated pages. + if newpgend != oldpgend { + f.mapsMu.Lock() + f.mappings.Invalidate(memmap.MappableRange{newpgend, oldpgend}, memmap.InvalidateOpts{ + // Compare Linux's mm/shmem.c:shmem_setattr() => + // mm/memory.c:unmap_mapping_range(evencows=1). + InvalidatePrivate: true, + }) + f.mapsMu.Unlock() + } + + // We are now guaranteed that there are no translations of truncated pages, + // and can remove them. + f.dataMu.Lock() + defer f.dataMu.Unlock() + f.data.Truncate(uint64(size), f.platform.Memory()) + + return nil +} + +// AddLink implements fs.InodeOperations.AddLink. +func (f *fileInodeOperations) AddLink() { + f.attrMu.Lock() + f.attr.Unstable.Links++ + f.attrMu.Unlock() +} + +// DropLink implements fs.InodeOperations.DropLink. +func (f *fileInodeOperations) DropLink() { + f.attrMu.Lock() + f.attr.Unstable.Links-- + f.attrMu.Unlock() +} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (f *fileInodeOperations) NotifyStatusChange(ctx context.Context) { + f.attrMu.Lock() + f.attr.TouchStatusChangeTime(ctx) + f.attrMu.Unlock() +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (*fileInodeOperations) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +func (*fileInodeOperations) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +func (f *fileInodeOperations) read(ctx context.Context, dst usermem.IOSequence, offset int64) (int64, error) { + // Zero length reads for tmpfs are no-ops. + if dst.NumBytes() == 0 { + return 0, nil + } + + // Have we reached EOF? We check for this again in + // fileReadWriter.ReadToBlocks to avoid holding f.attrMu (which would + // serialize reads) or f.dataMu (which would violate lock ordering), but + // check here first (before calling into MM) since reading at EOF is + // common: getting a return value of 0 from a read syscall is the only way + // to detect EOF. + // + // TODO: Separate out f.attr.Size and use atomics instead of + // f.dataMu. + f.dataMu.RLock() + size := f.attr.Unstable.Size + f.dataMu.RUnlock() + if offset >= size { + return 0, io.EOF + } + + n, err := dst.CopyOutFrom(ctx, &fileReadWriter{f, offset}) + // Compare Linux's mm/filemap.c:do_generic_file_read() => file_accessed(). + f.attrMu.Lock() + f.attr.TouchAccessTime(ctx) + f.attrMu.Unlock() + return n, err +} + +func (f *fileInodeOperations) write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) { + // Zero length writes for tmpfs are no-ops. + if src.NumBytes() == 0 { + return 0, nil + } + + f.attrMu.Lock() + defer f.attrMu.Unlock() + // Compare Linux's mm/filemap.c:__generic_file_write_iter() => file_update_time(). + f.attr.TouchModificationTime(ctx) + return src.CopyInTo(ctx, &fileReadWriter{f, offset}) +} + +type fileReadWriter struct { + f *fileInodeOperations + offset int64 +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (rw *fileReadWriter) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + rw.f.dataMu.RLock() + defer rw.f.dataMu.RUnlock() + + // Compute the range to read. + if rw.offset >= rw.f.attr.Unstable.Size { + return 0, io.EOF + } + end := fs.ReadEndOffset(rw.offset, int64(dsts.NumBytes()), rw.f.attr.Unstable.Size) + if end == rw.offset { // dsts.NumBytes() == 0? + return 0, nil + } + + mem := rw.f.platform.Memory() + var done uint64 + seg, gap := rw.f.data.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings. + ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Read) + if err != nil { + return done, err + } + + // Copy from internal mappings. + n, err := safemem.CopySeq(dsts, ims) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + // Tmpfs holes are zero-filled. + gapmr := gap.Range().Intersect(mr) + dst := dsts.TakeFirst64(gapmr.Length()) + n, err := safemem.ZeroSeq(dst) + done += n + rw.offset += int64(n) + dsts = dsts.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = gap.NextSegment(), fsutil.FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (rw *fileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + rw.f.dataMu.Lock() + defer rw.f.dataMu.Unlock() + + // Compute the range to write. + end := fs.WriteEndOffset(rw.offset, int64(srcs.NumBytes())) + if end == rw.offset { // srcs.NumBytes() == 0? + return 0, nil + } + + defer func() { + // If the write ends beyond the file's previous size, it causes the + // file to grow. + if rw.offset > rw.f.attr.Unstable.Size { + rw.f.attr.Unstable.Size = rw.offset + } + }() + + mem := rw.f.platform.Memory() + // Page-aligned mr for when we need to allocate memory. RoundUp can't + // overflow since end is an int64. + pgstartaddr := usermem.Addr(rw.offset).RoundDown() + pgendaddr, _ := usermem.Addr(end).RoundUp() + pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)} + + var done uint64 + seg, gap := rw.f.data.Find(uint64(rw.offset)) + for rw.offset < end { + mr := memmap.MappableRange{uint64(rw.offset), uint64(end)} + switch { + case seg.Ok(): + // Get internal mappings. + ims, err := mem.MapInternal(seg.FileRangeOf(seg.Range().Intersect(mr)), usermem.Write) + if err != nil { + return done, err + } + + // Copy to internal mappings. + n, err := safemem.CopySeq(ims, srcs) + done += n + rw.offset += int64(n) + srcs = srcs.DropFirst64(n) + if err != nil { + return done, err + } + + // Continue. + seg, gap = seg.NextNonEmpty() + + case gap.Ok(): + // Allocate memory for the write. + gapMR := gap.Range().Intersect(pgMR) + fr, err := mem.Allocate(gapMR.Length(), rw.f.memUsage) + if err != nil { + return done, err + } + + // Write to that memory as usual. + seg, gap = rw.f.data.Insert(gap, gapMR, fr.Start), fsutil.FileRangeGapIterator{} + + default: + break + } + } + return done, nil +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (f *fileInodeOperations) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + f.mappings.AddMapping(ms, ar, offset) + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (f *fileInodeOperations) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { + f.mapsMu.Lock() + defer f.mapsMu.Unlock() + f.mappings.RemoveMapping(ms, ar, offset) +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (f *fileInodeOperations) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + return f.AddMapping(ctx, ms, dstAR, offset) +} + +// Translate implements memmap.Mappable.Translate. +func (f *fileInodeOperations) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + f.dataMu.Lock() + defer f.dataMu.Unlock() + + // Constrain translations to f.attr.Unstable.Size (rounded up) to prevent + // translation to pages that may be concurrently truncated. + pgend := fs.OffsetPageEnd(f.attr.Unstable.Size) + var buserr error + if required.End > pgend { + buserr = &memmap.BusError{io.EOF} + required.End = pgend + } + if optional.End > pgend { + optional.End = pgend + } + + mem := f.platform.Memory() + cerr := f.data.Fill(ctx, required, optional, mem, f.memUsage, func(_ context.Context, dsts safemem.BlockSeq, _ uint64) (uint64, error) { + // Newly-allocated pages are zeroed, so we don't need to do anything. + return dsts.NumBytes(), nil + }) + + var ts []memmap.Translation + var translatedEnd uint64 + for seg := f.data.FindSegment(required.Start); seg.Ok() && seg.Start() < required.End; seg, _ = seg.NextNonEmpty() { + segMR := seg.Range().Intersect(optional) + ts = append(ts, memmap.Translation{ + Source: segMR, + File: mem, + Offset: seg.FileRangeOf(segMR).Start, + }) + translatedEnd = segMR.End + } + + // Don't return the error returned by f.data.Fill if it occurred outside of + // required. + if translatedEnd < required.End && cerr != nil { + return ts, cerr + } + return ts, buserr +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (f *fileInodeOperations) InvalidateUnsavable(ctx context.Context) error { + return nil +} diff --git a/pkg/sentry/fs/tmpfs/tmpfs.go b/pkg/sentry/fs/tmpfs/tmpfs.go new file mode 100644 index 000000000..1cc7ae491 --- /dev/null +++ b/pkg/sentry/fs/tmpfs/tmpfs.go @@ -0,0 +1,204 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tmpfs is a filesystem implementation backed by memory. +package tmpfs + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/ramfs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/pipe" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +var fsInfo = fs.Info{ + Type: linux.TMPFS_MAGIC, + + // TODO: allow configuring a tmpfs size and enforce it. + TotalBlocks: 0, + FreeBlocks: 0, +} + +// rename implements fs.InodeOperations.Rename for tmpfs nodes. +func rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + op, ok := oldParent.InodeOperations.(*Dir) + if !ok { + return ramfs.ErrCrossDevice + } + np, ok := newParent.InodeOperations.(*Dir) + if !ok { + return ramfs.ErrCrossDevice + } + return ramfs.Rename(ctx, &op.Dir, oldName, &np.Dir, newName) +} + +// Dir is a directory. +type Dir struct { + ramfs.Dir + + // platform is used to allocate storage for tmpfs Files. + platform platform.Platform +} + +// NewDir returns a new directory. +func NewDir(ctx context.Context, contents map[string]*fs.Inode, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource, platform platform.Platform) *fs.Inode { + d := &Dir{platform: platform} + d.InitDir(ctx, contents, owner, perms) + + // Manually set the CreateOps. + d.CreateOps = d.newCreateOps() + + return fs.NewInode(d, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +// afterLoad is invoked by stateify. +func (d *Dir) afterLoad() { + // Per NewDir, manually set the CreateOps. + d.Dir.CreateOps = d.newCreateOps() +} + +// newCreateOps builds the custom CreateOps for this Dir. +func (d *Dir) newCreateOps() *ramfs.CreateOps { + return &ramfs.CreateOps{ + NewDir: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + return NewDir(ctx, nil, fs.FileOwnerFromContext(ctx), perms, dir.MountSource, d.platform), nil + }, + NewFile: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + uattr := fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: perms, + // Always start unlinked. + Links: 0, + }) + iops := NewInMemoryFile(ctx, usage.Tmpfs, uattr, d.platform) + return fs.NewInode(iops, dir.MountSource, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.RegularFile, + }), nil + }, + NewSymlink: func(ctx context.Context, dir *fs.Inode, target string) (*fs.Inode, error) { + return NewSymlink(ctx, target, fs.FileOwnerFromContext(ctx), dir.MountSource), nil + }, + NewBoundEndpoint: func(ctx context.Context, dir *fs.Inode, socket unix.BoundEndpoint, perms fs.FilePermissions) (*fs.Inode, error) { + return NewSocket(ctx, socket, fs.FileOwnerFromContext(ctx), perms, dir.MountSource), nil + }, + NewFifo: func(ctx context.Context, dir *fs.Inode, perms fs.FilePermissions) (*fs.Inode, error) { + return NewFifo(ctx, fs.FileOwnerFromContext(ctx), perms, dir.MountSource), nil + }, + } +} + +// Rename implements fs.InodeOperations.Rename. +func (d *Dir) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return rename(ctx, oldParent, oldName, newParent, newName) +} + +// StatFS implments fs.InodeOperations.StatFS. +func (*Dir) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Symlink is a symlink. +type Symlink struct { + ramfs.Symlink +} + +// NewSymlink returns a new symlink with the provided permissions. +func NewSymlink(ctx context.Context, target string, owner fs.FileOwner, msrc *fs.MountSource) *fs.Inode { + s := &Symlink{} + s.InitSymlink(ctx, owner, target) + return fs.NewInode(s, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Symlink, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (s *Symlink) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return rename(ctx, oldParent, oldName, newParent, newName) +} + +// StatFS returns the tmpfs info. +func (s *Symlink) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Socket is a socket. +type Socket struct { + ramfs.Socket +} + +// NewSocket returns a new socket with the provided permissions. +func NewSocket(ctx context.Context, socket unix.BoundEndpoint, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode { + s := &Socket{} + s.InitSocket(ctx, socket, owner, perms) + return fs.NewInode(s, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Socket, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (s *Socket) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return rename(ctx, oldParent, oldName, newParent, newName) +} + +// StatFS returns the tmpfs info. +func (s *Socket) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} + +// Fifo is a tmpfs named pipe. +type Fifo struct { + ramfs.Entry +} + +// NewFifo creates a new named pipe. +func NewFifo(ctx context.Context, owner fs.FileOwner, perms fs.FilePermissions, msrc *fs.MountSource) *fs.Inode { + f := &Fifo{} + f.InitEntry(ctx, owner, perms) + iops := pipe.NewInodeOperations(f, pipe.NewPipe(ctx, true /* isNamed */, pipe.DefaultPipeSize, usermem.PageSize)) + return fs.NewInode(iops, msrc, fs.StableAttr{ + DeviceID: tmpfsDevice.DeviceID(), + InodeID: tmpfsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Pipe, + }) +} + +// Rename implements fs.InodeOperations.Rename. +func (f *Fifo) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error { + return rename(ctx, oldParent, oldName, newParent, newName) +} + +// StatFS returns the tmpfs info. +func (*Fifo) StatFS(context.Context) (fs.Info, error) { + return fsInfo, nil +} diff --git a/pkg/sentry/fs/tty/BUILD b/pkg/sentry/fs/tty/BUILD new file mode 100644 index 000000000..90b350410 --- /dev/null +++ b/pkg/sentry/fs/tty/BUILD @@ -0,0 +1,63 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "tty_state", + srcs = [ + "dir.go", + "fs.go", + "inode.go", + "line_discipline.go", + "master.go", + "slave.go", + "terminal.go", + ], + out = "tty_state.go", + package = "tty", +) + +go_library( + name = "tty", + srcs = [ + "dir.go", + "fs.go", + "inode.go", + "line_discipline.go", + "master.go", + "slave.go", + "terminal.go", + "tty_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/fs/tty", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_test( + name = "tty_test", + size = "small", + srcs = ["tty_test.go"], + embed = [":tty"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/fs/tty/dir.go b/pkg/sentry/fs/tty/dir.go new file mode 100644 index 000000000..2c5b2aed6 --- /dev/null +++ b/pkg/sentry/fs/tty/dir.go @@ -0,0 +1,398 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tty provide pseudoterminals via a devpts filesystem. +package tty + +import ( + "fmt" + "math" + "strconv" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// dirInodeOperations is the root of a devpts mount. +// +// This indirectly manages all terminals within the mount. +// +// New Terminals are created by masterInodeOperations.GetFile, which registers +// the slave Inode in the this directory for discovery via Lookup/Readdir. The +// slave inode is unregistered when the master file is Released, as the slave +// is no longer discoverable at that point. +// +// References on the underlying Terminal are held by masterFileOperations and +// slaveInodeOperations. +// +// masterInodeOperations and slaveInodeOperations hold a pointer to +// dirInodeOperations, which is reference counted by the refcount their +// corresponding Dirents hold on their parent (this directory). +// +// dirInodeOperations implements fs.InodeOperations. +type dirInodeOperations struct { + fsutil.DeprecatedFileOperations + fsutil.InodeNotSocket + fsutil.InodeNotRenameable + fsutil.InodeNotSymlink + fsutil.InodeNoExtendedAttributes + fsutil.NoMappable + fsutil.NoopWriteOut + + // msrc is the super block this directory is on. + // + // TODO: Plumb this through instead of storing it here. + msrc *fs.MountSource + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // attr contains the UnstableAttrs. + attr fsutil.InMemoryAttributes + + // master is the master PTY inode. + master *fs.Inode + + // slaves contains the slave inodes reachable from the directory. + // + // A new slave is added by allocateTerminal and is removed by + // masterFileOperations.Release. + // + // A reference is held on every slave in the map. + slaves map[uint32]*fs.Inode + + // dentryMap is a SortedDentryMap used to implement Readdir containing + // the master and all entries in slaves. + dentryMap *fs.SortedDentryMap + + // next is the next pty index to use. + // + // TODO: reuse indices when ptys are closed. + next uint32 +} + +var _ fs.InodeOperations = (*dirInodeOperations)(nil) + +// newDir creates a new dir with a ptmx file and no terminals. +func newDir(ctx context.Context, m *fs.MountSource) *fs.Inode { + d := &dirInodeOperations{ + attr: fsutil.InMemoryAttributes{ + Unstable: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.RootOwner, + Perms: fs.FilePermsFromMode(0555), + }), + }, + msrc: m, + slaves: make(map[uint32]*fs.Inode), + dentryMap: fs.NewSortedDentryMap(nil), + } + // Linux devpts uses a default mode of 0000 for ptmx which can be + // changed with the ptmxmode mount option. However, that default is not + // useful here (since we'd *always* need the mount option, so it is + // accessible by default). + d.master = newMasterInode(ctx, d, fs.RootOwner, fs.FilePermsFromMode(0666)) + d.dentryMap.Add("ptmx", fs.DentAttr{ + Type: d.master.StableAttr.Type, + InodeID: d.master.StableAttr.InodeID, + }) + + return fs.NewInode(d, m, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id 1 for the directory. See + // fs/devpts/inode.c:devpts_fill_super. + // + // TODO: Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + BlockSize: usermem.PageSize, + Type: fs.Directory, + }) +} + +// Release implements fs.InodeOperations.Release. +func (d *dirInodeOperations) Release(ctx context.Context) { + d.master.DecRef() + if len(d.slaves) != 0 { + panic(fmt.Sprintf("devpts directory still contains active terminals: %+v", d)) + } +} + +// Lookup implements fs.InodeOperations.Lookup. +func (d *dirInodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) { + d.mu.Lock() + defer d.mu.Unlock() + + // Master? + if name == "ptmx" { + d.master.IncRef() + return fs.NewDirent(d.master, name), nil + } + + // Slave number? + n, err := strconv.ParseUint(name, 10, 32) + if err != nil { + // Not found. + return nil, syserror.ENOENT + } + + s, ok := d.slaves[uint32(n)] + if !ok { + return nil, syserror.ENOENT + } + + s.IncRef() + return fs.NewDirent(s, name), nil +} + +// Create implements fs.InodeOperations.Create. +// +// Creation is never allowed. +func (d *dirInodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) { + return nil, syserror.EACCES +} + +// CreateDirectory implements fs.InodeOperations.CreateDirectory. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syserror.EACCES +} + +// CreateLink implements fs.InodeOperations.CreateLink. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname, newname string) error { + return syserror.EACCES +} + +// CreateHardLink implements fs.InodeOperations.CreateHardLink. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateHardLink(ctx context.Context, dir *fs.Inode, target *fs.Inode, name string) error { + return syserror.EACCES +} + +// CreateFifo implements fs.InodeOperations.CreateFifo. +// +// Creation is never allowed. +func (d *dirInodeOperations) CreateFifo(ctx context.Context, dir *fs.Inode, name string, perm fs.FilePermissions) error { + return syserror.EACCES +} + +// Remove implements fs.InodeOperations.Remove. +// +// Removal is never allowed. +func (d *dirInodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// RemoveDirectory implements fs.InodeOperations.RemoveDirectory. +// +// Removal is never allowed. +func (d *dirInodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error { + return syserror.EPERM +} + +// Bind implements fs.InodeOperations.Bind. +func (d *dirInodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, data unix.BoundEndpoint, perm fs.FilePermissions) error { + return syserror.EPERM +} + +// GetFile implements fs.InodeOperations.GetFile. +func (d *dirInodeOperations) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, dirent, flags, &dirFileOperations{di: d}), nil +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (d *dirInodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + d.mu.Lock() + defer d.mu.Unlock() + return d.attr.Unstable, nil +} + +// Check implements fs.InodeOperations.Check. +func (d *dirInodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions. +func (d *dirInodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + d.mu.Lock() + defer d.mu.Unlock() + return d.attr.SetPermissions(ctx, p) +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (d *dirInodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.attr.SetOwner(ctx, owner) +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (d *dirInodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.attr.SetTimestamps(ctx, ts) +} + +// Truncate implements fs.InodeOperations.Truncate. +func (d *dirInodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + return syserror.EINVAL +} + +// AddLink implements fs.InodeOperations.AddLink. +func (d *dirInodeOperations) AddLink() {} + +// DropLink implements fs.InodeOperations.DropLink. +func (d *dirInodeOperations) DropLink() {} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (d *dirInodeOperations) NotifyStatusChange(ctx context.Context) { + d.mu.Lock() + defer d.mu.Unlock() + + d.attr.TouchStatusChangeTime(ctx) +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (d *dirInodeOperations) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +func (d *dirInodeOperations) StatFS(ctx context.Context) (fs.Info, error) { + return fs.Info{ + Type: linux.DEVPTS_SUPER_MAGIC, + }, nil +} + +// allocateTerminal creates a new Terminal and installs a pts node for it. +// +// The caller must call DecRef when done with the returned Terminal. +func (d *dirInodeOperations) allocateTerminal(ctx context.Context) (*Terminal, error) { + d.mu.Lock() + defer d.mu.Unlock() + + n := d.next + if n == math.MaxUint32 { + return nil, syserror.ENOMEM + } + + if _, ok := d.slaves[n]; ok { + panic(fmt.Sprintf("pty index collision; index %d already exists", n)) + } + + t := newTerminal(ctx, d, n) + d.next++ + + // The reference returned by newTerminal is returned to the caller. + // Take another for the slave inode. + t.IncRef() + + // Create a pts node. The owner is based on the context that opens + // ptmx. + creds := auth.CredentialsFromContext(ctx) + uid, gid := creds.EffectiveKUID, creds.EffectiveKGID + slave := newSlaveInode(ctx, d, t, fs.FileOwner{uid, gid}, fs.FilePermsFromMode(0666)) + + d.slaves[n] = slave + d.dentryMap.Add(strconv.FormatUint(uint64(n), 10), fs.DentAttr{ + Type: slave.StableAttr.Type, + InodeID: slave.StableAttr.InodeID, + }) + + return t, nil +} + +// masterClose is called when the master end of t is closed. +func (d *dirInodeOperations) masterClose(t *Terminal) { + d.mu.Lock() + defer d.mu.Unlock() + + // The slave end disappears from the directory when the master end is + // closed, even if the slave end is open elsewhere. + // + // N.B. since we're using a backdoor method to remove a directory entry + // we won't properly fire inotify events like Linux would. + s, ok := d.slaves[t.n] + if !ok { + panic(fmt.Sprintf("Terminal %+v doesn't exist in %+v?", t, d)) + } + + s.DecRef() + delete(d.slaves, t.n) + d.dentryMap.Remove(strconv.FormatUint(uint64(t.n), 10)) +} + +// dirFileOperations are the fs.FileOperations for the directory. +// +// This is nearly identical to fsutil.DirFileOperations, except that it takes +// df.di.mu in IterateDir. +type dirFileOperations struct { + waiter.AlwaysReady `state:"nosave"` + fsutil.NoopRelease `state:"nosave"` + fsutil.GenericSeek `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + + // di is the inode operations. + di *dirInodeOperations + + // dirCursor contains the name of the last directory entry that was + // serialized. + dirCursor string +} + +var _ fs.FileOperations = (*dirFileOperations)(nil) + +// IterateDir implements DirIterator.IterateDir. +func (df *dirFileOperations) IterateDir(ctx context.Context, dirCtx *fs.DirCtx, offset int) (int, error) { + df.di.mu.Lock() + defer df.di.mu.Unlock() + + n, err := fs.GenericReaddir(dirCtx, df.di.dentryMap) + return offset + n, err +} + +// Readdir implements FileOperations.Readdir. +func (df *dirFileOperations) Readdir(ctx context.Context, file *fs.File, serializer fs.DentrySerializer) (int64, error) { + root := fs.RootFromContext(ctx) + defer root.DecRef() + dirCtx := &fs.DirCtx{ + Serializer: serializer, + DirCursor: &df.dirCursor, + } + return fs.DirentReaddir(ctx, file.Dirent, df, root, dirCtx, file.Offset()) +} + +// Read implements FileOperations.Read +func (df *dirFileOperations) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} + +// Write implements FileOperations.Write. +func (df *dirFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syserror.EISDIR +} diff --git a/pkg/sentry/fs/tty/fs.go b/pkg/sentry/fs/tty/fs.go new file mode 100644 index 000000000..f5e7a3162 --- /dev/null +++ b/pkg/sentry/fs/tty/fs.go @@ -0,0 +1,95 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// ptsDevice is the pseudo-filesystem device. +var ptsDevice = device.NewAnonDevice() + +// filesystem is a devpts filesystem. +// +// This devpts is always in the new "multi-instance" mode. i.e., it contains a +// ptmx device tied to this mount. +type filesystem struct{} + +func init() { + fs.RegisterFilesystem(&filesystem{}) +} + +// Name matches drivers/devpts/indoe.c:devpts_fs_type.name. +func (*filesystem) Name() string { + return "devpts" +} + +// AllowUserMount allows users to mount(2) this file system. +func (*filesystem) AllowUserMount() bool { + // TODO: Users may mount this once the terminals are in a + // usable state. + return false +} + +// Flags returns that there is nothing special about this file system. +func (*filesystem) Flags() fs.FilesystemFlags { + return 0 +} + +// MountSource returns a devpts root that can be positioned in the vfs. +func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string) (*fs.Inode, error) { + // device is always ignored. + + // No options are supported. + if data != "" { + return nil, syserror.EINVAL + } + + return newDir(ctx, fs.NewMountSource(&superOperations{}, f, flags)), nil +} + +// superOperations implements fs.MountSourceOperations, preventing caching. +type superOperations struct{} + +// Revalidate implements fs.DirentOperations.Revalidate. +// +// It always returns true, forcing a Lookup for all entries. +// +// Slave entries are dropped from dir when their master is closed, so an +// existing slave Dirent in the tree is not sufficient to guarantee that it +// still exists on the filesystem. +func (superOperations) Revalidate(*fs.Dirent) bool { + return true +} + +// Keep implements fs.DirentOperations.Keep. +// +// Keep returns false because Revalidate would force a lookup on cached entries +// anyways. +func (superOperations) Keep(*fs.Dirent) bool { + return false +} + +// ResetInodeMappings implements MountSourceOperations.ResetInodeMappings. +func (superOperations) ResetInodeMappings() {} + +// SaveInodeMapping implements MountSourceOperations.SaveInodeMapping. +func (superOperations) SaveInodeMapping(*fs.Inode, string) {} + +// Destroy implements MountSourceOperations.Destroy. +func (superOperations) Destroy() {} diff --git a/pkg/sentry/fs/tty/inode.go b/pkg/sentry/fs/tty/inode.go new file mode 100644 index 000000000..04b9a7727 --- /dev/null +++ b/pkg/sentry/fs/tty/inode.go @@ -0,0 +1,143 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// inodeOperations are the base fs.InodeOperations for master and slave Inodes. +// +// inodeOperations does not implement: +// +// * fs.InodeOperations.Release +// * fs.InodeOperations.GetFile +type inodeOperations struct { + fsutil.DeprecatedFileOperations `state:"nosave"` + fsutil.InodeNoExtendedAttributes `state:"nosave"` + fsutil.InodeNotDirectory `state:"nosave"` + fsutil.InodeNotRenameable `state:"nosave"` + fsutil.InodeNotSocket `state:"nosave"` + fsutil.InodeNotSymlink `state:"nosave"` + fsutil.NoMappable `state:"nosave"` + fsutil.NoopWriteOut `state:"nosave"` + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // uattr is the inode's UnstableAttr. + uattr fs.UnstableAttr +} + +// UnstableAttr implements fs.InodeOperations.UnstableAttr. +func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { + i.mu.Lock() + defer i.mu.Unlock() + return i.uattr, nil +} + +// Check implements fs.InodeOperations.Check. +func (i *inodeOperations) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + return fs.ContextCanAccessFile(ctx, inode, p) +} + +// SetPermissions implements fs.InodeOperations.SetPermissions +func (i *inodeOperations) SetPermissions(ctx context.Context, inode *fs.Inode, p fs.FilePermissions) bool { + i.mu.Lock() + defer i.mu.Unlock() + i.uattr.Perms = p + i.uattr.StatusChangeTime = ktime.NowFromContext(ctx) + return true +} + +// SetOwner implements fs.InodeOperations.SetOwner. +func (i *inodeOperations) SetOwner(ctx context.Context, inode *fs.Inode, owner fs.FileOwner) error { + i.mu.Lock() + defer i.mu.Unlock() + if owner.UID.Ok() { + i.uattr.Owner.UID = owner.UID + } + if owner.GID.Ok() { + i.uattr.Owner.GID = owner.GID + } + return nil +} + +// SetTimestamps implements fs.InodeOperations.SetTimestamps. +func (i *inodeOperations) SetTimestamps(ctx context.Context, inode *fs.Inode, ts fs.TimeSpec) error { + if ts.ATimeOmit && ts.MTimeOmit { + return nil + } + + i.mu.Lock() + defer i.mu.Unlock() + + now := ktime.NowFromContext(ctx) + if !ts.ATimeOmit { + if ts.ATime.IsZero() { + i.uattr.AccessTime = now + } else { + i.uattr.AccessTime = ts.ATime + } + } + if !ts.MTimeOmit { + if ts.MTime.IsZero() { + i.uattr.ModificationTime = now + } else { + i.uattr.ModificationTime = ts.MTime + } + } + i.uattr.StatusChangeTime = now + return nil +} + +// Truncate implements fs.InodeOperations.Truncate. +func (i *inodeOperations) Truncate(ctx context.Context, inode *fs.Inode, size int64) error { + return syserror.EINVAL +} + +// AddLink implements fs.InodeOperations.AddLink. +func (i *inodeOperations) AddLink() { +} + +// DropLink implements fs.InodeOperations.DropLink. +func (i *inodeOperations) DropLink() { +} + +// NotifyStatusChange implements fs.InodeOperations.NotifyStatusChange. +func (i *inodeOperations) NotifyStatusChange(ctx context.Context) { + i.mu.Lock() + defer i.mu.Unlock() + i.uattr.StatusChangeTime = ktime.NowFromContext(ctx) +} + +// IsVirtual implements fs.InodeOperations.IsVirtual. +func (i *inodeOperations) IsVirtual() bool { + return true +} + +// StatFS implements fs.InodeOperations.StatFS. +func (i *inodeOperations) StatFS(ctx context.Context) (fs.Info, error) { + return fs.Info{ + Type: linux.DEVPTS_SUPER_MAGIC, + }, nil +} diff --git a/pkg/sentry/fs/tty/line_discipline.go b/pkg/sentry/fs/tty/line_discipline.go new file mode 100644 index 000000000..fde4e7941 --- /dev/null +++ b/pkg/sentry/fs/tty/line_discipline.go @@ -0,0 +1,342 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "bytes" + "sync" + "unicode/utf8" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + spacesPerTab = 8 +) + +// lineDiscipline dictates how input and output are handled between the +// pseudoterminal (pty) master and slave. It can be configured to alter I/O, +// modify control characters (e.g. Ctrl-C for SIGINT), etc. The following man +// pages are good resources for how to affect the line discipline: +// +// * termios(3) +// * tty_ioctl(4) +// +// This file corresponds most closely to drivers/tty/n_tty.c. +// +// lineDiscipline has a simple structure but supports a multitude of options +// (see the above man pages). It consists of two queues of bytes: one from the +// terminal master to slave (the input queue) and one from slave to master (the +// output queue). When bytes are written to one end of the pty, the line +// discipline reads the bytes, modifies them or takes special action if +// required, and enqueues them to be read by the other end of the pty: +// +// input from terminal +-------------+ input to process (e.g. bash) +// +------------------------>| input queue |---------------------------+ +// | +-------------+ | +// | | +// | v +// masterFD slaveFD +// ^ | +// | | +// | output to terminal +--------------+ output from process | +// +------------------------| output queue |<--------------------------+ +// +--------------+ +// +// Lock order: +// inMu +// outMu +// termiosMu +type lineDiscipline struct { + // inMu protects inQueue. + inMu sync.Mutex `state:"nosave"` + + // inQueue is the input queue of the terminal. + inQueue queue + + // outMu protects outQueue. + outMu sync.Mutex `state:"nosave"` + + // outQueue is the output queue of the terminal. + outQueue queue + + // termiosMu protects termios. + termiosMu sync.Mutex `state:"nosave"` + + // termios is the terminal configuration used by the lineDiscipline. + termios linux.KernelTermios + + // column is the location in a row of the cursor. This is important for + // handling certain special characters like backspace. + column int +} + +// getTermios gets the linux.Termios for the tty. +func (l *lineDiscipline) getTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + l.termiosMu.Lock() + defer l.termiosMu.Unlock() + // We must copy a Termios struct, not KernelTermios. + t := l.termios.ToTermios() + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), t, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err +} + +// setTermios sets a linux.Termios for the tty. +func (l *lineDiscipline) setTermios(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + l.termiosMu.Lock() + defer l.termiosMu.Unlock() + // We must copy a Termios struct, not KernelTermios. + var t linux.Termios + _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &t, usermem.IOOpts{ + AddressSpaceActive: true, + }) + l.termios.FromTermios(t) + return 0, err +} + +func (l *lineDiscipline) masterReadiness() waiter.EventMask { + l.inMu.Lock() + defer l.inMu.Unlock() + l.outMu.Lock() + defer l.outMu.Unlock() + return l.inQueue.writeReadiness() | l.outQueue.readReadiness() +} + +func (l *lineDiscipline) slaveReadiness() waiter.EventMask { + l.inMu.Lock() + defer l.inMu.Unlock() + l.outMu.Lock() + defer l.outMu.Unlock() + return l.outQueue.writeReadiness() | l.inQueue.readReadiness() +} + +// queue represents one of the input or output queues between a pty master and +// slave. +type queue struct { + waiter.Queue `state:"nosave"` + buf bytes.Buffer `state:".([]byte)"` +} + +// saveBuf is invoked by stateify. +func (q *queue) saveBuf() []byte { + return append([]byte(nil), q.buf.Bytes()...) +} + +// loadBuf is invoked by stateify. +func (q *queue) loadBuf(b []byte) { + q.buf.Write(b) +} + +// readReadiness returns whether q is ready to be read from. +// +// Preconditions: q's mutex must be held. +func (q *queue) readReadiness() waiter.EventMask { + ready := waiter.EventMask(0) + if q.buf.Len() > 0 { + ready |= waiter.EventIn + } + return ready +} + +// writeReadiness returns whether q is ready to be written to. +func (q *queue) writeReadiness() waiter.EventMask { + return waiter.EventOut +} + +func (l *lineDiscipline) inputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) { + l.inMu.Lock() + defer l.inMu.Unlock() + return l.queueRead(ctx, dst, &l.inQueue) +} + +func (l *lineDiscipline) inputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) { + l.inMu.Lock() + defer l.inMu.Unlock() + return l.queueWrite(ctx, src, &l.inQueue, false) +} + +func (l *lineDiscipline) outputQueueRead(ctx context.Context, dst usermem.IOSequence) (int64, error) { + l.outMu.Lock() + defer l.outMu.Unlock() + return l.queueRead(ctx, dst, &l.outQueue) +} + +func (l *lineDiscipline) outputQueueWrite(ctx context.Context, src usermem.IOSequence) (int64, error) { + l.outMu.Lock() + defer l.outMu.Unlock() + return l.queueWrite(ctx, src, &l.outQueue, true) +} + +// queueRead reads from q to userspace. +// +// Preconditions: q's lock must be held. +func (l *lineDiscipline) queueRead(ctx context.Context, dst usermem.IOSequence, q *queue) (int64, error) { + // Copy bytes out to user-space. queueRead doesn't have to do any + // processing or other extra work -- that's all taken care of when + // writing to a queue. + n, err := q.buf.WriteTo(dst.Writer(ctx)) + + // If state changed, notify any waiters. If nothing was available to + // read, let the caller know we could block. + if n > 0 { + q.Notify(waiter.EventOut) + } else if err == nil { + return 0, syserror.ErrWouldBlock + } + return int64(n), err +} + +// queueWrite writes to q from userspace. `output` is whether the queue being +// written to should be subject to output processing (i.e. whether it is the +// output queue). +// +// Precondition: q's lock must be held. +func (l *lineDiscipline) queueWrite(ctx context.Context, src usermem.IOSequence, q *queue, output bool) (int64, error) { + // TODO: Use CopyInTo/safemem to avoid extra copying. + // Get the bytes to write from user-space. + b := make([]byte, src.NumBytes()) + n, err := src.CopyIn(ctx, b) + if err != nil { + return 0, err + } + b = b[:n] + + // If state changed, notify any waiters. If we were unable to write + // anything, let the caller know we could block. + if n > 0 { + q.Notify(waiter.EventIn) + } else { + return 0, syserror.ErrWouldBlock + } + + // Optionally perform line discipline transformations depending on + // whether we're writing to the input queue or output queue. + var buf *bytes.Buffer + l.termiosMu.Lock() + if output { + buf = l.transformOutput(b) + } else { + buf = l.transformInput(b) + } + l.termiosMu.Unlock() + + // Enqueue buf at the end of the queue. + buf.WriteTo(&q.buf) + return int64(n), err +} + +// transformOutput does ouput processing for one end of the pty. See +// drivers/tty/n_tty.c:do_output_char for an analagous kernel function. +// +// Precondition: l.termiosMu must be held. +func (l *lineDiscipline) transformOutput(buf []byte) *bytes.Buffer { + if !l.termios.OEnabled(linux.OPOST) { + return bytes.NewBuffer(buf) + } + + var ret bytes.Buffer + for len(buf) > 0 { + c := l.removeRune(&buf) + switch c { + case '\n': + if l.termios.OEnabled(linux.ONLRET) { + l.column = 0 + } + if l.termios.OEnabled(linux.ONLCR) { + ret.Write([]byte{'\r', '\n'}) + continue + } + case '\r': + if l.termios.OEnabled(linux.ONOCR) && l.column == 0 { + continue + } + if l.termios.OEnabled(linux.OCRNL) { + c = '\n' + if l.termios.OEnabled(linux.ONLRET) { + l.column = 0 + } + break + } + l.column = 0 + case '\t': + spaces := spacesPerTab - l.column%spacesPerTab + if l.termios.OutputFlags&linux.TABDLY == linux.XTABS { + l.column += spaces + ret.Write(bytes.Repeat([]byte{' '}, 8)) + continue + } + l.column += spaces + case '\b': + if l.column > 0 { + l.column-- + } + default: + l.column++ + } + ret.WriteRune(c) + } + return &ret +} + +// transformInput does input processing for one end of the pty. Characters +// read are transformed according to flags set in the termios struct. See +// drivers/tty/n_tty.c:n_tty_receive_char_special for an analogous kernel +// function. +// +// Precondition: l.termiosMu must be held. +func (l *lineDiscipline) transformInput(buf []byte) *bytes.Buffer { + var ret bytes.Buffer + for len(buf) > 0 { + c := l.removeRune(&buf) + switch c { + case '\r': + if l.termios.IEnabled(linux.IGNCR) { + continue + } + if l.termios.IEnabled(linux.ICRNL) { + c = '\n' + } + case '\n': + if l.termios.IEnabled(linux.INLCR) { + c = '\r' + } + } + ret.WriteRune(c) + } + return &ret +} + +// removeRune removes and returns the first rune from the byte array. The +// buffer's length is updated accordingly. +func (l *lineDiscipline) removeRune(b *[]byte) rune { + var c rune + var size int + // If UTF-8 support is enabled, runes might be multiple bytes. + if l.termios.IEnabled(linux.IUTF8) { + c, size = utf8.DecodeRune(*b) + } else { + c = rune((*b)[0]) + size = 1 + } + *b = (*b)[size:] + return c +} diff --git a/pkg/sentry/fs/tty/master.go b/pkg/sentry/fs/tty/master.go new file mode 100644 index 000000000..3c47ee517 --- /dev/null +++ b/pkg/sentry/fs/tty/master.go @@ -0,0 +1,173 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// masterInodeOperations are the fs.InodeOperations for the master end of the +// Terminal (ptmx file). +type masterInodeOperations struct { + inodeOperations + + // d is the containing dir. + d *dirInodeOperations +} + +var _ fs.InodeOperations = (*masterInodeOperations)(nil) + +// newMasterInode creates an Inode for the master end of a terminal. +func newMasterInode(ctx context.Context, d *dirInodeOperations, owner fs.FileOwner, p fs.FilePermissions) *fs.Inode { + iops := &masterInodeOperations{ + inodeOperations: inodeOperations{ + uattr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: p, + Links: 1, + // Size and Blocks are always 0. + }), + }, + d: d, + } + + return fs.NewInode(iops, d.msrc, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id 2 for ptmx. See + // fs/devpts/inode.c:mknod_ptmx. + // + // TODO: Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + Type: fs.CharacterDevice, + // See fs/devpts/inode.c:devpts_fill_super. + BlockSize: 1024, + // The PTY master effectively has two different major/minor + // device numbers. + // + // This one is returned by stat for both opened and unopened + // instances of this inode. + // + // When the inode is opened (GetFile), a new device number is + // allocated based on major UNIX98_PTY_MASTER_MAJOR and the tty + // index as minor number. However, this device number is only + // accessible via ioctl(TIOCGDEV) and /proc/TID/stat. + DeviceFileMajor: linux.TTYAUX_MAJOR, + DeviceFileMinor: linux.PTMX_MINOR, + }) +} + +// Release implements fs.InodeOperations.Release. +func (mi *masterInodeOperations) Release(ctx context.Context) { +} + +// GetFile implements fs.InodeOperations.GetFile. +// +// It allocates a new terminal. +func (mi *masterInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + t, err := mi.d.allocateTerminal(ctx) + if err != nil { + return nil, err + } + + return fs.NewFile(ctx, d, flags, &masterFileOperations{ + d: mi.d, + t: t, + }), nil +} + +// masterFileOperations are the fs.FileOperations for the master end of a terminal. +type masterFileOperations struct { + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + + // d is the containing dir. + d *dirInodeOperations + + // t is the connected Terminal. + t *Terminal +} + +var _ fs.FileOperations = (*masterFileOperations)(nil) + +// Release implements fs.FileOperations.Release. +func (mf *masterFileOperations) Release() { + mf.d.masterClose(mf.t) + mf.t.DecRef() +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (mf *masterFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + mf.t.ld.inQueue.EventRegister(e, mask) + mf.t.ld.outQueue.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (mf *masterFileOperations) EventUnregister(e *waiter.Entry) { + mf.t.ld.inQueue.EventUnregister(e) + mf.t.ld.outQueue.EventUnregister(e) +} + +// Readiness implements waiter.Waitable.Readiness. +func (mf *masterFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return mf.t.ld.masterReadiness() +} + +// Read implements fs.FileOperations.Read. +func (mf *masterFileOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + return mf.t.ld.outputQueueRead(ctx, dst) +} + +// Write implements fs.FileOperations.Write. +func (mf *masterFileOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + return mf.t.ld.inputQueueWrite(ctx, src) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (mf *masterFileOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch args[1].Uint() { + case linux.TCGETS: + // N.B. TCGETS on the master actually returns the configuration + // of the slave end. + return mf.t.ld.getTermios(ctx, io, args) + case linux.TCSETS: + // N.B. TCSETS on the master actually affects the configuration + // of the slave end. + return mf.t.ld.setTermios(ctx, io, args) + case linux.TCSETSW: + // TODO: This should drain the output queue first. + return mf.t.ld.setTermios(ctx, io, args) + case linux.TIOCGPTN: + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(mf.t.n), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + case linux.TIOCSPTLCK: + // TODO: Implement pty locking. For now just pretend we do. + return 0, nil + default: + return 0, syserror.ENOTTY + } +} diff --git a/pkg/sentry/fs/tty/slave.go b/pkg/sentry/fs/tty/slave.go new file mode 100644 index 000000000..9178071a4 --- /dev/null +++ b/pkg/sentry/fs/tty/slave.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// slaveInodeOperations are the fs.InodeOperations for the slave end of the +// Terminal (pts file). +type slaveInodeOperations struct { + inodeOperations + + // d is the containing dir. + d *dirInodeOperations + + // t is the connected Terminal. + t *Terminal +} + +var _ fs.InodeOperations = (*slaveInodeOperations)(nil) + +// newSlaveInode creates an fs.Inode for the slave end of a terminal. +// +// newSlaveInode takes ownership of t. +func newSlaveInode(ctx context.Context, d *dirInodeOperations, t *Terminal, owner fs.FileOwner, p fs.FilePermissions) *fs.Inode { + iops := &slaveInodeOperations{ + inodeOperations: inodeOperations{ + uattr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: owner, + Perms: p, + Links: 1, + // Size and Blocks are always 0. + }), + }, + d: d, + t: t, + } + + return fs.NewInode(iops, d.msrc, fs.StableAttr{ + DeviceID: ptsDevice.DeviceID(), + // N.B. Linux always uses inode id = tty index + 3. See + // fs/devpts/inode.c:devpts_pty_new. + // + // TODO: Since ptsDevice must be shared between + // different mounts, we must not assign fixed numbers. + InodeID: ptsDevice.NextIno(), + Type: fs.CharacterDevice, + // See fs/devpts/inode.c:devpts_fill_super. + BlockSize: 1024, + DeviceFileMajor: linux.UNIX98_PTY_SLAVE_MAJOR, + DeviceFileMinor: t.n, + }) +} + +// Release implements fs.InodeOperations.Release. +func (si *slaveInodeOperations) Release(ctx context.Context) { + si.t.DecRef() +} + +// GetFile implements fs.InodeOperations.GetFile. +// +// This may race with destruction of the terminal. If the terminal is gone, it +// returns ENOENT. +func (si *slaveInodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + return fs.NewFile(ctx, d, flags, &slaveFileOperations{si: si}), nil +} + +// slaveFileOperations are the fs.FileOperations for the slave end of a terminal. +type slaveFileOperations struct { + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + + // si is the inode operations. + si *slaveInodeOperations +} + +var _ fs.FileOperations = (*slaveFileOperations)(nil) + +// Release implements fs.FileOperations.Release. +func (sf *slaveFileOperations) Release() { +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (sf *slaveFileOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + sf.si.t.ld.outQueue.EventRegister(e, mask) + sf.si.t.ld.inQueue.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (sf *slaveFileOperations) EventUnregister(e *waiter.Entry) { + sf.si.t.ld.outQueue.EventUnregister(e) + sf.si.t.ld.inQueue.EventUnregister(e) +} + +// Readiness implements waiter.Waitable.Readiness. +func (sf *slaveFileOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return sf.si.t.ld.slaveReadiness() +} + +// Read implements fs.FileOperations.Read. +func (sf *slaveFileOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + return sf.si.t.ld.inputQueueRead(ctx, dst) +} + +// Write implements fs.FileOperations.Write. +func (sf *slaveFileOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + return sf.si.t.ld.outputQueueWrite(ctx, src) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (sf *slaveFileOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch args[1].Uint() { + case linux.TCGETS: + return sf.si.t.ld.getTermios(ctx, io, args) + case linux.TCSETS: + return sf.si.t.ld.setTermios(ctx, io, args) + case linux.TCSETSW: + // TODO: This should drain the output queue first. + return sf.si.t.ld.setTermios(ctx, io, args) + case linux.TIOCGPTN: + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), uint32(sf.si.t.n), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + default: + return 0, syserror.ENOTTY + } +} diff --git a/pkg/sentry/fs/tty/terminal.go b/pkg/sentry/fs/tty/terminal.go new file mode 100644 index 000000000..6ae713a32 --- /dev/null +++ b/pkg/sentry/fs/tty/terminal.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// Terminal is a pseudoterminal. +type Terminal struct { + refs.AtomicRefCount + + // n is the terminal index. + n uint32 + + // d is the containing directory. + d *dirInodeOperations + + // ld is the line discipline of the terminal. + ld lineDiscipline +} + +func newTerminal(ctx context.Context, d *dirInodeOperations, n uint32) *Terminal { + termios := linux.DefaultSlaveTermios + return &Terminal{ + d: d, + n: n, + ld: lineDiscipline{termios: termios}, + } +} diff --git a/pkg/sentry/fs/tty/tty_test.go b/pkg/sentry/fs/tty/tty_test.go new file mode 100644 index 000000000..0c7560ed7 --- /dev/null +++ b/pkg/sentry/fs/tty/tty_test.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tty + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func TestSimpleMasterToSlave(t *testing.T) { + ld := lineDiscipline{termios: linux.DefaultSlaveTermios} + ctx := contexttest.Context(t) + inBytes := []byte("hello, tty\n") + src := usermem.BytesIOSequence(inBytes) + outBytes := make([]byte, 32) + dst := usermem.BytesIOSequence(outBytes) + + // Write to the input queue. + nw, err := ld.inputQueueWrite(ctx, src) + if err != nil { + t.Fatalf("error writing to input queue: %v", err) + } + if nw != int64(len(inBytes)) { + t.Fatalf("wrote wrong length: got %d, want %d", nw, len(inBytes)) + } + + // Read from the input queue. + nr, err := ld.inputQueueRead(ctx, dst) + if err != nil { + t.Fatalf("error reading from input queue: %v", err) + } + if nr != int64(len(inBytes)) { + t.Fatalf("read wrong length: got %d, want %d", nr, len(inBytes)) + } + + outStr := string(outBytes[:nr]) + inStr := string(inBytes) + if outStr != inStr { + t.Fatalf("written and read strings do not match: got %q, want %q", outStr, inStr) + } +} diff --git a/pkg/sentry/hostcpu/BUILD b/pkg/sentry/hostcpu/BUILD new file mode 100644 index 000000000..9457618d8 --- /dev/null +++ b/pkg/sentry/hostcpu/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "hostcpu", + srcs = [ + "getcpu_amd64.s", + "hostcpu.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/hostcpu", + visibility = ["//:sandbox"], +) + +go_test( + name = "hostcpu_test", + size = "small", + srcs = ["hostcpu_test.go"], + embed = [":hostcpu"], +) diff --git a/pkg/sentry/hostcpu/getcpu_amd64.s b/pkg/sentry/hostcpu/getcpu_amd64.s new file mode 100644 index 000000000..7f6247d81 --- /dev/null +++ b/pkg/sentry/hostcpu/getcpu_amd64.s @@ -0,0 +1,24 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// func GetCPU() (cpu uint32) +TEXT ·GetCPU(SB), NOSPLIT, $0-4 + BYTE $0x0f; BYTE $0x01; BYTE $0xf9; // RDTSCP + // On Linux, the bottom 12 bits of IA32_TSC_AUX are CPU and the upper 20 + // are node. See arch/x86/entry/vdso/vma.c:vgetcpu_cpu_init(). + ANDL $0xfff, CX + MOVL CX, cpu+0(FP) + RET diff --git a/pkg/sentry/hostcpu/hostcpu.go b/pkg/sentry/hostcpu/hostcpu.go new file mode 100644 index 000000000..fa46499ad --- /dev/null +++ b/pkg/sentry/hostcpu/hostcpu.go @@ -0,0 +1,67 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hostcpu provides utilities for working with CPU information provided +// by a host Linux kernel. +package hostcpu + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "unicode" +) + +// GetCPU returns the caller's current CPU number, without using the Linux VDSO +// (which is not available to the sentry) or the getcpu(2) system call (which +// is relatively slow). +func GetCPU() uint32 + +// MaxPossibleCPU returns the highest possible CPU number, which is guaranteed +// not to change for the lifetime of the host kernel. +func MaxPossibleCPU() (uint32, error) { + const path = "/sys/devices/system/cpu/possible" + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + str := string(data) + // Linux: drivers/base/cpu.c:show_cpus_attr() => + // include/linux/cpumask.h:cpumask_print_to_pagebuf() => + // lib/bitmap.c:bitmap_print_to_pagebuf() + i, err := maxValueInLinuxBitmap(str) + if err != nil { + return 0, fmt.Errorf("invalid %s (%q): %v", path, str, err) + } + return uint32(i), nil +} + +// maxValueInLinuxBitmap returns the maximum value specified in str, which is a +// string emitted by Linux's lib/bitmap.c:bitmap_print_to_pagebuf(list=true). +func maxValueInLinuxBitmap(str string) (uint64, error) { + str = strings.TrimSpace(str) + // Find the last decimal number in str. + idx := strings.LastIndexFunc(str, func(c rune) bool { + return !unicode.IsDigit(c) + }) + if idx != -1 { + str = str[idx+1:] + } + i, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return 0, err + } + return i, nil +} diff --git a/pkg/sentry/hostcpu/hostcpu_test.go b/pkg/sentry/hostcpu/hostcpu_test.go new file mode 100644 index 000000000..a82e1a271 --- /dev/null +++ b/pkg/sentry/hostcpu/hostcpu_test.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostcpu + +import ( + "fmt" + "testing" +) + +func TestMaxValueInLinuxBitmap(t *testing.T) { + for _, test := range []struct { + str string + max uint64 + }{ + {"0", 0}, + {"0\n", 0}, + {"0,2", 2}, + {"0-63", 63}, + {"0-3,8-11", 11}, + } { + t.Run(fmt.Sprintf("%q", test.str), func(t *testing.T) { + max, err := maxValueInLinuxBitmap(test.str) + if err != nil || max != test.max { + t.Errorf("maxValueInLinuxBitmap: got (%d, %v), wanted (%d, nil)", max, err, test.max) + } + }) + } +} + +func TestMaxValueInLinuxBitmapErrors(t *testing.T) { + for _, str := range []string{"", "\n"} { + t.Run(fmt.Sprintf("%q", str), func(t *testing.T) { + max, err := maxValueInLinuxBitmap(str) + if err == nil { + t.Errorf("maxValueInLinuxBitmap: got (%d, nil), wanted (_, error)", max) + } + t.Log(err) + }) + } +} diff --git a/pkg/sentry/inet/BUILD b/pkg/sentry/inet/BUILD new file mode 100644 index 000000000..207cdb692 --- /dev/null +++ b/pkg/sentry/inet/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//:sandbox"], + licenses = ["notice"], # Apache 2.0 +) + +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "inet_state", + srcs = ["inet.go"], + out = "inet_state.go", + package = "inet", +) + +go_library( + name = "inet", + srcs = [ + "inet.go", + "inet_state.go", + "test_stack.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/inet", + deps = [ + "//pkg/state", + ], +) diff --git a/pkg/sentry/inet/inet.go b/pkg/sentry/inet/inet.go new file mode 100644 index 000000000..e4b326993 --- /dev/null +++ b/pkg/sentry/inet/inet.go @@ -0,0 +1,99 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package inet defines semantics for IP stacks. +package inet + +// Stack represents a TCP/IP stack. +type Stack interface { + // Interfaces returns all network interfaces as a mapping from interface + // indexes to interface properties. Interface indices are strictly positive + // integers. + Interfaces() map[int32]Interface + + // InterfaceAddrs returns all network interface addresses as a mapping from + // interface indexes to a slice of associated interface address properties. + InterfaceAddrs() map[int32][]InterfaceAddr + + // SupportsIPv6 returns true if the stack supports IPv6 connectivity. + SupportsIPv6() bool + + // TCPReceiveBufferSize returns TCP receive buffer size settings. + TCPReceiveBufferSize() (TCPBufferSize, error) + + // SetTCPReceiveBufferSize attempts to change TCP receive buffer size + // settings. + SetTCPReceiveBufferSize(size TCPBufferSize) error + + // TCPSendBufferSize returns TCP send buffer size settings. + TCPSendBufferSize() (TCPBufferSize, error) + + // SetTCPSendBufferSize attempts to change TCP send buffer size settings. + SetTCPSendBufferSize(size TCPBufferSize) error + + // TCPSACKEnabled returns true if RFC 2018 TCP Selective Acknowledgements + // are enabled. + TCPSACKEnabled() (bool, error) + + // SetTCPSACKEnabled attempts to change TCP selective acknowledgement + // settings. + SetTCPSACKEnabled(enabled bool) error +} + +// Interface contains information about a network interface. +type Interface struct { + // Keep these fields sorted in the order they appear in rtnetlink(7). + + // DeviceType is the device type, a Linux ARPHRD_* constant. + DeviceType uint16 + + // Flags is the device flags; see netdevice(7), under "Ioctls", + // "SIOCGIFFLAGS, SIOCSIFFLAGS". + Flags uint32 + + // Name is the device name. + Name string + + // Addr is the hardware device address. + Addr []byte +} + +// InterfaceAddr contains information about a network interface address. +type InterfaceAddr struct { + // Keep these fields sorted in the order they appear in rtnetlink(7). + + // Family is the address family, a Linux AF_* constant. + Family uint8 + + // PrefixLen is the address prefix length. + PrefixLen uint8 + + // Flags is the address flags. + Flags uint8 + + // Addr is the actual address. + Addr []byte +} + +// TCPBufferSize contains settings controlling TCP buffer sizing. +type TCPBufferSize struct { + // Min is the minimum size. + Min int + + // Default is the default size. + Default int + + // Max is the maximum size. + Max int +} diff --git a/pkg/sentry/inet/test_stack.go b/pkg/sentry/inet/test_stack.go new file mode 100644 index 000000000..bc10926ee --- /dev/null +++ b/pkg/sentry/inet/test_stack.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package inet + +// TestStack is a dummy implementation of Stack for tests. +type TestStack struct { + InterfacesMap map[int32]Interface + InterfaceAddrsMap map[int32][]InterfaceAddr + SupportsIPv6Flag bool + TCPRecvBufSize TCPBufferSize + TCPSendBufSize TCPBufferSize + TCPSACKFlag bool +} + +// NewTestStack returns a TestStack with no network interfaces. The value of +// all other options is unspecified; tests that rely on specific values must +// set them explicitly. +func NewTestStack() *TestStack { + return &TestStack{ + InterfacesMap: make(map[int32]Interface), + InterfaceAddrsMap: make(map[int32][]InterfaceAddr), + } +} + +// Interfaces implements Stack.Interfaces. +func (s *TestStack) Interfaces() map[int32]Interface { + return s.InterfacesMap +} + +// InterfaceAddrs implements Stack.InterfaceAddrs. +func (s *TestStack) InterfaceAddrs() map[int32][]InterfaceAddr { + return s.InterfaceAddrsMap +} + +// SupportsIPv6 implements Stack.SupportsIPv6. +func (s *TestStack) SupportsIPv6() bool { + return s.SupportsIPv6Flag +} + +// TCPReceiveBufferSize implements Stack.TCPReceiveBufferSize. +func (s *TestStack) TCPReceiveBufferSize() (TCPBufferSize, error) { + return s.TCPRecvBufSize, nil +} + +// SetTCPReceiveBufferSize implements Stack.SetTCPReceiveBufferSize. +func (s *TestStack) SetTCPReceiveBufferSize(size TCPBufferSize) error { + s.TCPRecvBufSize = size + return nil +} + +// TCPSendBufferSize implements Stack.TCPSendBufferSize. +func (s *TestStack) TCPSendBufferSize() (TCPBufferSize, error) { + return s.TCPSendBufSize, nil +} + +// SetTCPSendBufferSize implements Stack.SetTCPSendBufferSize. +func (s *TestStack) SetTCPSendBufferSize(size TCPBufferSize) error { + s.TCPSendBufSize = size + return nil +} + +// TCPSACKEnabled implements Stack.TCPSACKEnabled. +func (s *TestStack) TCPSACKEnabled() (bool, error) { + return s.TCPSACKFlag, nil +} + +// SetTCPSACKEnabled implements Stack.SetTCPSACKEnabled. +func (s *TestStack) SetTCPSACKEnabled(enabled bool) error { + s.TCPSACKFlag = enabled + return nil +} diff --git a/pkg/sentry/kernel/BUILD b/pkg/sentry/kernel/BUILD new file mode 100644 index 000000000..62794cff5 --- /dev/null +++ b/pkg/sentry/kernel/BUILD @@ -0,0 +1,234 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "kernel_state", + srcs = [ + "abstract_socket_namespace.go", + "fd_map.go", + "fs_context.go", + "ipc_namespace.go", + "kernel.go", + "pending_signals.go", + "pending_signals_list.go", + "process_group_list.go", + "ptrace.go", + "rseq.go", + "session_list.go", + "sessions.go", + "signal.go", + "signal_handlers.go", + "syscalls.go", + "syscalls_state.go", + "syslog.go", + "task.go", + "task_clone.go", + "task_context.go", + "task_exec.go", + "task_exit.go", + "task_list.go", + "task_resources.go", + "task_run.go", + "task_sched.go", + "task_signals.go", + "task_start.go", + "task_syscall.go", + "thread_group.go", + "threads.go", + "timekeeper.go", + "timekeeper_state.go", + "timer.go", + "uts_namespace.go", + "vdso.go", + "version.go", + ], + out = "kernel_state.go", + imports = ["gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs"], + package = "kernel", +) + +go_template_instance( + name = "pending_signals_list", + out = "pending_signals_list.go", + package = "kernel", + prefix = "pendingSignal", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*pendingSignal", + }, +) + +go_template_instance( + name = "process_group_list", + out = "process_group_list.go", + package = "kernel", + prefix = "processGroup", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*ProcessGroup", + }, +) + +go_template_instance( + name = "seqatomic_taskgoroutineschedinfo", + out = "seqatomic_taskgoroutineschedinfo.go", + package = "kernel", + suffix = "TaskGoroutineSchedInfo", + template = "//pkg/sync:generic_seqatomic", + types = { + "Value": "TaskGoroutineSchedInfo", + }, +) + +go_template_instance( + name = "session_list", + out = "session_list.go", + package = "kernel", + prefix = "session", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Session", + }, +) + +go_template_instance( + name = "task_list", + out = "task_list.go", + package = "kernel", + prefix = "task", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Task", + }, +) + +go_library( + name = "kernel", + srcs = [ + "abstract_socket_namespace.go", + "context.go", + "fd_map.go", + "fs_context.go", + "ipc_namespace.go", + "kernel.go", + "kernel_state.go", + "pending_signals.go", + "pending_signals_list.go", + "process_group_list.go", + "ptrace.go", + "rseq.go", + "seccomp.go", + "seqatomic_taskgoroutineschedinfo.go", + "session_list.go", + "sessions.go", + "signal.go", + "signal_handlers.go", + "syscalls.go", + "syscalls_state.go", + "syslog.go", + "task.go", + "task_acct.go", + "task_block.go", + "task_clone.go", + "task_context.go", + "task_exec.go", + "task_exit.go", + "task_identity.go", + "task_list.go", + "task_log.go", + "task_net.go", + "task_resources.go", + "task_run.go", + "task_sched.go", + "task_signals.go", + "task_start.go", + "task_stop.go", + "task_syscall.go", + "task_usermem.go", + "thread_group.go", + "threads.go", + "timekeeper.go", + "timekeeper_state.go", + "timer.go", + "uts_namespace.go", + "vdso.go", + "version.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel", + visibility = ["//:sandbox"], + deps = [ + "//pkg/abi", + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/binary", + "//pkg/bits", + "//pkg/bpf", + "//pkg/cpuid", + "//pkg/eventchannel", + "//pkg/log", + "//pkg/refs", + "//pkg/secio", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/lock", + "//pkg/sentry/fs/timerfd", + "//pkg/sentry/hostcpu", + "//pkg/sentry/inet", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/epoll", + "//pkg/sentry/kernel/futex", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/sched", + "//pkg/sentry/kernel/semaphore", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/loader", + "//pkg/sentry/memmap", + "//pkg/sentry/mm", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/socket/netlink/port", + "//pkg/sentry/time", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/state/statefile", + "//pkg/sync", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) + +go_test( + name = "kernel_test", + size = "small", + srcs = [ + "fd_map_test.go", + "table_test.go", + "task_test.go", + "timekeeper_test.go", + ], + embed = [":kernel"], + deps = [ + "//pkg/abi", + "//pkg/sentry/arch", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs/filetest", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/sched", + "//pkg/sentry/limits", + "//pkg/sentry/platform", + "//pkg/sentry/time", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/kernel/README.md b/pkg/sentry/kernel/README.md new file mode 100644 index 000000000..3306780d6 --- /dev/null +++ b/pkg/sentry/kernel/README.md @@ -0,0 +1,106 @@ +This package contains: + +- A (partial) emulation of the "core Linux kernel", which governs task + execution and scheduling, system call dispatch, and signal handling. See + below for details. + +- The top-level interface for the sentry's Linux kernel emulation in general, + used by the `main` function of all versions of the sentry. This interface + revolves around the `Env` type (defined in `kernel.go`). + +# Background + +In Linux, each schedulable context is referred to interchangeably as a "task" or +"thread". Tasks can be divided into userspace and kernel tasks. In the sentry, +scheduling is managed by the Go runtime, so each schedulable context is a +goroutine; only "userspace" (application) contexts are referred to as tasks, and +represented by Task objects. (From this point forward, "task" refers to the +sentry's notion of a task unless otherwise specified.) + +At a high level, Linux application threads can be thought of as repeating a "run +loop": + +- Some amount of application code is executed in userspace. + +- A trap (explicit syscall invocation, hardware interrupt or exception, etc.) + causes control flow to switch to the kernel. + +- Some amount of kernel code is executed in kernelspace, e.g. to handle the + cause of the trap. + +- The kernel "returns from the trap" into application code. + +Analogously, each task in the sentry is associated with a *task goroutine* that +executes that task's run loop (`Task.run` in `task_run.go`). However, the +sentry's task run loop differs in structure in order to support saving execution +state to, and resuming execution from, checkpoints. + +While in kernelspace, a Linux thread can be descheduled (cease execution) in a +variety of ways: + +- It can yield or be preempted, becoming temporarily descheduled but still + runnable. At present, the sentry delegates scheduling of runnable threads to + the Go runtime. + +- It can exit, becoming permanently descheduled. The sentry's equivalent is + returning from `Task.run`, terminating the task goroutine. + +- It can enter interruptible sleep, a state in which it can be woken by a + caller-defined wakeup or the receipt of a signal. In the sentry, interruptible + sleep (which is ambiguously referred to as *blocking*) is implemented by + making all events that can end blocking (including signal notifications) + communicated via Go channels and using `select` to multiplex wakeup sources; + see `task_block.go`. + +- It can enter uninterruptible sleep, a state in which it can only be woken by a + caller-defined wakeup. Killable sleep is a closely related variant in which + the task can also be woken by SIGKILL. (These definitions also include Linux's + "group-stopped" (`TASK_STOPPED`) and "ptrace-stopped" (`TASK_TRACED`) states.) + +To maximize compatibility with Linux, sentry checkpointing appears as a spurious +signal-delivery interrupt on all tasks; interrupted system calls return `EINTR` +or are automatically restarted as usual. However, these semantics require that +uninterruptible and killable sleeps do not appear to be interrupted. In other +words, the state of the task, including its progress through the interrupted +operation, must be preserved by checkpointing. For many such sleeps, the wakeup +condition is application-controlled, making it infeasible to wait for the sleep +to end before checkpointing. Instead, we must support checkpointing progress +through sleeping operations. + +# Implementation + +We break the task's control flow graph into *states*, delimited by: + +1. Points where uninterruptible and killable sleeps may occur. For example, +there exists a state boundary between signal dequeueing and signal delivery +because there may be an intervening ptrace signal-delivery-stop. + +2. Points where sleep-induced branches may "rejoin" normal execution. For +example, the syscall exit state exists because it can be reached immediately +following a synchronous syscall, or after a task that is sleeping in `execve()` +or `vfork()` resumes execution. + +3. Points containing large branches. This is strictly for organizational +purposes. For example, the state that processes interrupt-signaled conditions is +kept separate from the main "app" state to reduce the size of the latter. + +4. `SyscallReinvoke`, which does not correspond to anything in Linux, and exists +solely to serve the autosave feature. + +![dot -Tsvg -Goverlap=false -orun_states.svg run_states.dot](g3doc/run_states.dot "Task control flow graph") + +States before which a stop may occur are represented as implementations of the +`taskRunState` interface named `run(state)`, allowing them to be saved and +restored. States that cannot be immediately preceded by a stop are simply `Task` +methods named `do(state)`. + +Conditions that can require task goroutines to cease execution for unknown +lengths of time are called *stops*. Stops are divided into *internal stops*, +which are stops whose start and end conditions are implemented within the +sentry, and *external stops*, which are stops whose start and end conditions are +not known to the sentry. Hence all uninterruptible and killable sleeps are +internal stops, and the existence of a pending checkpoint operation is an +external stop. Internal stops are reified into instances of the `TaskStop` type, +while external stops are merely counted. The task run loop alternates between +checking for stops and advancing the task's state. This allows checkpointing to +hold tasks in a stopped state while waiting for all tasks in the system to stop. diff --git a/pkg/sentry/kernel/abstract_socket_namespace.go b/pkg/sentry/kernel/abstract_socket_namespace.go new file mode 100644 index 000000000..014c4a3bf --- /dev/null +++ b/pkg/sentry/kernel/abstract_socket_namespace.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +type abstractEndpoint struct { + ep unix.BoundEndpoint + wr *refs.WeakRef + name string + ns *AbstractSocketNamespace +} + +// WeakRefGone implements refs.WeakRefUser.WeakRefGone. +func (e *abstractEndpoint) WeakRefGone() { + e.ns.mu.Lock() + if e.ns.endpoints[e.name].ep == e.ep { + delete(e.ns.endpoints, e.name) + } + e.ns.mu.Unlock() +} + +// AbstractSocketNamespace is used to implement the Linux abstract socket functionality. +type AbstractSocketNamespace struct { + mu sync.Mutex `state:"nosave"` + + // Keeps mapping from name to endpoint. + endpoints map[string]abstractEndpoint +} + +// NewAbstractSocketNamespace returns a new AbstractSocketNamespace. +func NewAbstractSocketNamespace() *AbstractSocketNamespace { + return &AbstractSocketNamespace{ + endpoints: make(map[string]abstractEndpoint), + } +} + +// A boundEndpoint wraps a unix.BoundEndpoint to maintain a reference on its +// backing object. +type boundEndpoint struct { + unix.BoundEndpoint + rc refs.RefCounter +} + +// Release implements unix.BoundEndpoint.Release. +func (e *boundEndpoint) Release() { + e.rc.DecRef() + e.BoundEndpoint.Release() +} + +// BoundEndpoint retrieves the endpoint bound to the given name. The return +// value is nil if no endpoint was bound. +func (a *AbstractSocketNamespace) BoundEndpoint(name string) unix.BoundEndpoint { + a.mu.Lock() + defer a.mu.Unlock() + + ep, ok := a.endpoints[name] + if !ok { + return nil + } + + rc := ep.wr.Get() + if rc == nil { + delete(a.endpoints, name) + return nil + } + + return &boundEndpoint{ep.ep, rc} +} + +// Bind binds the given socket. +// +// When the last reference managed by rc is dropped, ep may be removed from the +// namespace. +func (a *AbstractSocketNamespace) Bind(name string, ep unix.BoundEndpoint, rc refs.RefCounter) error { + a.mu.Lock() + defer a.mu.Unlock() + + if ep, ok := a.endpoints[name]; ok { + if rc := ep.wr.Get(); rc != nil { + rc.DecRef() + return syscall.EADDRINUSE + } + } + + ae := abstractEndpoint{ep: ep, name: name, ns: a} + ae.wr = refs.NewWeakRef(rc, &ae) + a.endpoints[name] = ae + return nil +} diff --git a/pkg/sentry/kernel/auth/BUILD b/pkg/sentry/kernel/auth/BUILD new file mode 100644 index 000000000..7f0680b88 --- /dev/null +++ b/pkg/sentry/kernel/auth/BUILD @@ -0,0 +1,73 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "auth_state", + srcs = [ + "credentials.go", + "id.go", + "id_map_range.go", + "id_map_set.go", + "user_namespace.go", + ], + out = "auth_state.go", + package = "auth", +) + +go_template_instance( + name = "id_map_range", + out = "id_map_range.go", + package = "auth", + prefix = "idMap", + template = "//pkg/segment:generic_range", + types = { + "T": "uint32", + }, +) + +go_template_instance( + name = "id_map_set", + out = "id_map_set.go", + consts = { + "minDegree": "3", + }, + package = "auth", + prefix = "idMap", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint32", + "Range": "idMapRange", + "Value": "uint32", + "Functions": "idMapFunctions", + }, +) + +go_library( + name = "auth", + srcs = [ + "auth.go", + "auth_state.go", + "capability_set.go", + "context.go", + "credentials.go", + "id.go", + "id_map.go", + "id_map_functions.go", + "id_map_range.go", + "id_map_set.go", + "user_namespace.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/bits", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/state", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/kernel/auth/auth.go b/pkg/sentry/kernel/auth/auth.go new file mode 100644 index 000000000..c49a6b852 --- /dev/null +++ b/pkg/sentry/kernel/auth/auth.go @@ -0,0 +1,22 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth implements an access control model that is a subset of Linux's. +// +// The auth package supports two kinds of access controls: user/group IDs and +// capabilities. Each resource in the security model is associated with a user +// namespace; "privileged" operations check that the operator's credentials +// have the required user/group IDs or capabilities within the user namespace +// of accessed resources. +package auth diff --git a/pkg/sentry/kernel/auth/capability_set.go b/pkg/sentry/kernel/auth/capability_set.go new file mode 100644 index 000000000..5b8164c49 --- /dev/null +++ b/pkg/sentry/kernel/auth/capability_set.go @@ -0,0 +1,61 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bits" +) + +// A CapabilitySet is a set of capabilities implemented as a bitset. The zero +// value of CapabilitySet is a set containing no capabilities. +type CapabilitySet uint64 + +// AllCapabilities is a CapabilitySet containing all valid capabilities. +var AllCapabilities = CapabilitySetOf(linux.MaxCapability+1) - 1 + +// CapabilitySetOf returns a CapabilitySet containing only the given +// capability. +func CapabilitySetOf(cp linux.Capability) CapabilitySet { + return CapabilitySet(bits.MaskOf64(int(cp))) +} + +// CapabilitySetOfMany returns a CapabilitySet containing the given capabilities. +func CapabilitySetOfMany(cps []linux.Capability) CapabilitySet { + var cs uint64 + for _, cp := range cps { + cs |= bits.MaskOf64(int(cp)) + } + return CapabilitySet(cs) +} + +// TaskCapabilities represents all the capability sets for a task. Each of these +// sets is explained in greater detail in capabilities(7). +type TaskCapabilities struct { + // Permitted is a limiting superset for the effective capabilities that + // the thread may assume. + PermittedCaps CapabilitySet + // Inheritable is a set of capabilities preserved across an execve(2). + InheritableCaps CapabilitySet + // Effective is the set of capabilities used by the kernel to perform + // permission checks for the thread. + EffectiveCaps CapabilitySet + // Bounding is a limiting superset for the capabilities that a thread + // can add to its inheritable set using capset(2). + BoundingCaps CapabilitySet + // Ambient is a set of capabilities that are preserved across an + // execve(2) of a program that is not privileged. + AmbientCaps CapabilitySet +} diff --git a/pkg/sentry/kernel/auth/context.go b/pkg/sentry/kernel/auth/context.go new file mode 100644 index 000000000..914589b28 --- /dev/null +++ b/pkg/sentry/kernel/auth/context.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the auth package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxCredentials is a Context.Value key for Credentials. + CtxCredentials contextID = iota +) + +// CredentialsFromContext returns a copy of the Credentials used by ctx, or a +// set of Credentials with no capabilities if ctx does not have Credentials. +func CredentialsFromContext(ctx context.Context) *Credentials { + if v := ctx.Value(CtxCredentials); v != nil { + return v.(*Credentials) + } + return NewAnonymousCredentials() +} diff --git a/pkg/sentry/kernel/auth/credentials.go b/pkg/sentry/kernel/auth/credentials.go new file mode 100644 index 000000000..b832b28fe --- /dev/null +++ b/pkg/sentry/kernel/auth/credentials.go @@ -0,0 +1,227 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Credentials contains information required to authorize privileged operations +// in a user namespace. +type Credentials struct { + // Real/effective/saved user/group IDs in the root user namespace. None of + // these should ever be NoID. + RealKUID KUID + EffectiveKUID KUID + SavedKUID KUID + RealKGID KGID + EffectiveKGID KGID + SavedKGID KGID + + // Filesystem user/group IDs are not implemented. "... setfsuid() is + // nowadays unneeded and should be avoided in new applications (likewise + // for setfsgid(2))." - setfsuid(2) + + // Supplementary groups used by set/getgroups. + // + // ExtraKGIDs slices are immutable, allowing multiple Credentials with the + // same ExtraKGIDs to share the same slice. + ExtraKGIDs []KGID + + // The capability sets applicable to this set of credentials. + PermittedCaps CapabilitySet + InheritableCaps CapabilitySet + EffectiveCaps CapabilitySet + BoundingCaps CapabilitySet + // Ambient capabilities are not introduced until Linux 4.3. + + // KeepCaps is the flag for PR_SET_KEEPCAPS which allow capabilities to be + // maintained after a switch from root user to non-root user via setuid(). + KeepCaps bool + + // The user namespace associated with the owner of the credentials. + UserNamespace *UserNamespace +} + +// NewAnonymousCredentials returns a set of credentials with no capabilities in +// any user namespace. +func NewAnonymousCredentials() *Credentials { + // Create a new root user namespace. Since the new namespace's owner is + // KUID 0 and the returned credentials have non-zero KUID/KGID, the + // returned credentials do not have any capabilities in the new namespace. + // Since the new namespace is not part of any existing user namespace + // hierarchy, the returned credentials do not have any capabilities in any + // other namespace. + return &Credentials{ + RealKUID: NobodyKUID, + EffectiveKUID: NobodyKUID, + SavedKUID: NobodyKUID, + RealKGID: NobodyKGID, + EffectiveKGID: NobodyKGID, + SavedKGID: NobodyKGID, + UserNamespace: NewRootUserNamespace(), + } +} + +// NewRootCredentials returns a set of credentials with KUID and KGID 0 (i.e. +// global root) in user namespace ns. +func NewRootCredentials(ns *UserNamespace) *Credentials { + // I can't find documentation for this anywhere, but it's correct for the + // inheritable capability set to be initially empty (the capabilities test + // checks for this property). + return &Credentials{ + RealKUID: RootKUID, + EffectiveKUID: RootKUID, + SavedKUID: RootKUID, + RealKGID: RootKGID, + EffectiveKGID: RootKGID, + SavedKGID: RootKGID, + PermittedCaps: AllCapabilities, + EffectiveCaps: AllCapabilities, + BoundingCaps: AllCapabilities, + UserNamespace: ns, + } +} + +// NewUserCredentials returns a set of credentials based on the given UID, GIDs, +// and capabilities in a given namespace. If all arguments are their zero +// values, this returns the same credentials as NewRootCredentials. +func NewUserCredentials(kuid KUID, kgid KGID, extraKGIDs []KGID, capabilities *TaskCapabilities, ns *UserNamespace) *Credentials { + creds := NewRootCredentials(ns) + + // Set the UID. + uid := kuid + creds.RealKUID = uid + creds.EffectiveKUID = uid + creds.SavedKUID = uid + + // Set GID. + gid := kgid + creds.RealKGID = gid + creds.EffectiveKGID = gid + creds.SavedKGID = gid + + // Set additional GIDs. + creds.ExtraKGIDs = append(creds.ExtraKGIDs, extraKGIDs...) + + // Set capabilities. If capabilities aren't specified, we default to + // all capabilities. + if capabilities != nil { + creds.PermittedCaps = capabilities.PermittedCaps + creds.EffectiveCaps = capabilities.EffectiveCaps + creds.BoundingCaps = capabilities.BoundingCaps + creds.InheritableCaps = capabilities.InheritableCaps + // // TODO: Support ambient capabilities. + } else { + // If no capabilities are specified, grant the same capabilites + // that NewRootCredentials does. + creds.PermittedCaps = AllCapabilities + creds.EffectiveCaps = AllCapabilities + creds.BoundingCaps = AllCapabilities + } + + return creds +} + +// Fork generates an identical copy of a set of credentials. +func (c *Credentials) Fork() *Credentials { + nc := new(Credentials) + *nc = *c // Copy-by-value; this is legal for all fields. + return nc +} + +// InGroup returns true if c is in group kgid. Compare Linux's +// kernel/groups.c:in_group_p(). +func (c *Credentials) InGroup(kgid KGID) bool { + if c.EffectiveKGID == kgid { + return true + } + for _, extraKGID := range c.ExtraKGIDs { + if extraKGID == kgid { + return true + } + } + return false +} + +// HasCapabilityIn returns true if c has capability cp in ns. +func (c *Credentials) HasCapabilityIn(cp linux.Capability, ns *UserNamespace) bool { + for { + // "1. A process has a capability inside a user namespace if it is a member + // of that namespace and it has the capability in its effective capability + // set." - user_namespaces(7) + if c.UserNamespace == ns { + return CapabilitySetOf(cp)&c.EffectiveCaps != 0 + } + // "3. ... A process that resides in the parent of the user namespace and + // whose effective user ID matches the owner of the namespace has all + // capabilities in the namespace." + if c.UserNamespace == ns.parent && c.EffectiveKUID == ns.owner { + return true + } + // "2. If a process has a capability in a user namespace, then it has that + // capability in all child (and further removed descendant) namespaces as + // well." + if ns.parent == nil { + return false + } + ns = ns.parent + } +} + +// HasCapability returns true if c has capability cp in its user namespace. +func (c *Credentials) HasCapability(cp linux.Capability) bool { + return c.HasCapabilityIn(cp, c.UserNamespace) +} + +// UseUID checks that c can use uid in its user namespace, then translates it +// to the root user namespace. +// +// The checks UseUID does are common, but you should verify that it's doing +// exactly what you want. +func (c *Credentials) UseUID(uid UID) (KUID, error) { + // uid must be mapped. + kuid := c.UserNamespace.MapToKUID(uid) + if !kuid.Ok() { + return NoID, syserror.EINVAL + } + // If c has CAP_SETUID, then it can use any UID in its user namespace. + if c.HasCapability(linux.CAP_SETUID) { + return kuid, nil + } + // Otherwise, c must already have the UID as its real, effective, or saved + // set-user-ID. + if kuid == c.RealKUID || kuid == c.EffectiveKUID || kuid == c.SavedKUID { + return kuid, nil + } + return NoID, syserror.EPERM +} + +// UseGID checks that c can use gid in its user namespace, then translates it +// to the root user namespace. +func (c *Credentials) UseGID(gid GID) (KGID, error) { + kgid := c.UserNamespace.MapToKGID(gid) + if !kgid.Ok() { + return NoID, syserror.EINVAL + } + if c.HasCapability(linux.CAP_SETGID) { + return kgid, nil + } + if kgid == c.RealKGID || kgid == c.EffectiveKGID || kgid == c.SavedKGID { + return kgid, nil + } + return NoID, syserror.EPERM +} diff --git a/pkg/sentry/kernel/auth/id.go b/pkg/sentry/kernel/auth/id.go new file mode 100644 index 000000000..37522b018 --- /dev/null +++ b/pkg/sentry/kernel/auth/id.go @@ -0,0 +1,121 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "math" +) + +// UID is a user ID in an unspecified user namespace. +type UID uint32 + +// GID is a group ID in an unspecified user namespace. +type GID uint32 + +// In the root user namespace, user/group IDs have a 1-to-1 relationship with +// the users/groups they represent. In other user namespaces, this is not the +// case; for example, two different unmapped users may both "have" the overflow +// UID. This means that it is generally only valid to compare user and group +// IDs in the root user namespace. We assign distinct types, KUID/KGID, to such +// IDs to emphasize this distinction. ("k" is for "key", as in "unique key". +// Linux also uses the prefix "k", but I think they mean "kernel".) + +// KUID is a user ID in the root user namespace. +type KUID uint32 + +// KGID is a group ID in the root user namespace. +type KGID uint32 + +const ( + // NoID is uint32(-1). -1 is consistently used as a special value, in Linux + // and by extension in the auth package, to mean "no ID": + // + // - ID mapping returns -1 if the ID is not mapped. + // + // - Most set*id() syscalls accept -1 to mean "do not change this ID". + NoID = math.MaxUint32 + + // OverflowUID is the default value of /proc/sys/kernel/overflowuid. The + // "overflow UID" is usually [1] used when translating a user ID between + // namespaces fails because the ID is not mapped. (We don't implement this + // file, so the overflow UID is constant.) + // + // [1] "There is one notable case where unmapped user and group IDs are not + // converted to the corresponding overflow ID value. When viewing a uid_map + // or gid_map file in which there is no mapping for the second field, that + // field is displayed as 4294967295 (-1 as an unsigned integer);" - + // user_namespaces(7) + OverflowUID = UID(65534) + OverflowGID = GID(65534) + + // NobodyKUID is the user ID usually reserved for the least privileged user + // "nobody". + NobodyKUID = KUID(65534) + NobodyKGID = KGID(65534) + + // RootKUID is the user ID usually used for the most privileged user "root". + RootKUID = KUID(0) + RootKGID = KGID(0) + RootUID = UID(0) + RootGID = GID(0) +) + +// Ok returns true if uid is not -1. +func (uid UID) Ok() bool { + return uid != NoID +} + +// Ok returns true if gid is not -1. +func (gid GID) Ok() bool { + return gid != NoID +} + +// Ok returns true if kuid is not -1. +func (kuid KUID) Ok() bool { + return kuid != NoID +} + +// Ok returns true if kgid is not -1. +func (kgid KGID) Ok() bool { + return kgid != NoID +} + +// OrOverflow returns uid if it is valid and the overflow UID otherwise. +func (uid UID) OrOverflow() UID { + if uid.Ok() { + return uid + } + return OverflowUID +} + +// OrOverflow returns gid if it is valid and the overflow GID otherwise. +func (gid GID) OrOverflow() GID { + if gid.Ok() { + return gid + } + return OverflowGID +} + +// In translates kuid into user namespace ns. If kuid is not mapped in ns, In +// returns NoID. +func (kuid KUID) In(ns *UserNamespace) UID { + return ns.MapFromKUID(kuid) +} + +// In translates kgid into user namespace ns. If kgid is not mapped in ns, In +// returns NoID. +func (kgid KGID) In(ns *UserNamespace) GID { + return ns.MapFromKGID(kgid) +} diff --git a/pkg/sentry/kernel/auth/id_map.go b/pkg/sentry/kernel/auth/id_map.go new file mode 100644 index 000000000..6adb33530 --- /dev/null +++ b/pkg/sentry/kernel/auth/id_map.go @@ -0,0 +1,283 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// MapFromKUID translates kuid, a UID in the root namespace, to a UID in ns. +func (ns *UserNamespace) MapFromKUID(kuid KUID) UID { + if ns.parent == nil { + return UID(kuid) + } + return UID(ns.mapID(&ns.uidMapFromParent, uint32(ns.parent.MapFromKUID(kuid)))) +} + +// MapFromKGID translates kgid, a GID in the root namespace, to a GID in ns. +func (ns *UserNamespace) MapFromKGID(kgid KGID) GID { + if ns.parent == nil { + return GID(kgid) + } + return GID(ns.mapID(&ns.gidMapFromParent, uint32(ns.parent.MapFromKGID(kgid)))) +} + +// MapToKUID translates uid, a UID in ns, to a UID in the root namespace. +func (ns *UserNamespace) MapToKUID(uid UID) KUID { + if ns.parent == nil { + return KUID(uid) + } + return ns.parent.MapToKUID(UID(ns.mapID(&ns.uidMapToParent, uint32(uid)))) +} + +// MapToKGID translates gid, a GID in ns, to a GID in the root namespace. +func (ns *UserNamespace) MapToKGID(gid GID) KGID { + if ns.parent == nil { + return KGID(gid) + } + return ns.parent.MapToKGID(GID(ns.mapID(&ns.gidMapToParent, uint32(gid)))) +} + +func (ns *UserNamespace) mapID(m *idMapSet, id uint32) uint32 { + if id == NoID { + return NoID + } + ns.mu.Lock() + defer ns.mu.Unlock() + if it := m.FindSegment(id); it.Ok() { + return it.Value() + (id - it.Start()) + } + return NoID +} + +// allIDsMapped returns true if all IDs in the range [start, end) are mapped in +// m. +// +// Preconditions: end >= start. +func (ns *UserNamespace) allIDsMapped(m *idMapSet, start, end uint32) bool { + ns.mu.Lock() + defer ns.mu.Unlock() + return m.SpanRange(idMapRange{start, end}) == end-start +} + +// An IDMapEntry represents a mapping from a range of contiguous IDs in a user +// namespace to an equally-sized range of contiguous IDs in the namespace's +// parent. +type IDMapEntry struct { + // FirstID is the first ID in the range in the namespace. + FirstID uint32 + + // FirstParentID is the first ID in the range in the parent namespace. + FirstParentID uint32 + + // Length is the number of IDs in the range. + Length uint32 +} + +// SetUIDMap instructs ns to translate UIDs as specified by entries. +// +// Note: SetUIDMap does not place an upper bound on the number of entries, but +// Linux does. This restriction is implemented in SetUIDMap's caller, the +// implementation of /proc/[pid]/uid_map. +func (ns *UserNamespace) SetUIDMap(ctx context.Context, entries []IDMapEntry) error { + c := CredentialsFromContext(ctx) + + ns.mu.Lock() + defer ns.mu.Unlock() + // "After the creation of a new user namespace, the uid_map file of *one* + // of the processes in the namespace may be written to *once* to define the + // mapping of user IDs in the new user namespace. An attempt to write more + // than once to a uid_map file in a user namespace fails with the error + // EPERM. Similar rules apply for gid_map files." - user_namespaces(7) + if !ns.uidMapFromParent.IsEmpty() { + return syserror.EPERM + } + // "At least one line must be written to the file." + if len(entries) == 0 { + return syserror.EINVAL + } + // """ + // In order for a process to write to the /proc/[pid]/uid_map + // (/proc/[pid]/gid_map) file, all of the following requirements must be + // met: + // + // 1. The writing process must have the CAP_SETUID (CAP_SETGID) capability + // in the user namespace of the process pid. + // """ + if !c.HasCapabilityIn(linux.CAP_SETUID, ns) { + return syserror.EPERM + } + // "2. The writing process must either be in the user namespace of the process + // pid or be in the parent user namespace of the process pid." + if c.UserNamespace != ns && c.UserNamespace != ns.parent { + return syserror.EPERM + } + // """ + // 3. (see trySetUIDMap) + // + // 4. One of the following two cases applies: + // + // * Either the writing process has the CAP_SETUID (CAP_SETGID) capability + // in the parent user namespace. + // """ + if !c.HasCapabilityIn(linux.CAP_SETUID, ns.parent) { + // """ + // * Or otherwise all of the following restrictions apply: + // + // + The data written to uid_map (gid_map) must consist of a single line + // that maps the writing process' effective user ID (group ID) in the + // parent user namespace to a user ID (group ID) in the user namespace. + // """ + if len(entries) != 1 || ns.parent.MapToKUID(UID(entries[0].FirstParentID)) != c.EffectiveKUID || entries[0].Length != 1 { + return syserror.EPERM + } + // """ + // + The writing process must have the same effective user ID as the + // process that created the user namespace. + // """ + if c.EffectiveKUID != ns.owner { + return syserror.EPERM + } + } + // trySetUIDMap leaves data in maps if it fails. + if err := ns.trySetUIDMap(entries); err != nil { + ns.uidMapFromParent.RemoveAll() + ns.uidMapToParent.RemoveAll() + return err + } + return nil +} + +func (ns *UserNamespace) trySetUIDMap(entries []IDMapEntry) error { + for _, e := range entries { + // Determine upper bounds and check for overflow. This implicitly + // checks for NoID. + lastID := e.FirstID + e.Length + if lastID <= e.FirstID { + return syserror.EINVAL + } + lastParentID := e.FirstParentID + e.Length + if lastParentID <= e.FirstParentID { + return syserror.EINVAL + } + // "3. The mapped user IDs (group IDs) must in turn have a mapping in + // the parent user namespace." + // Only the root namespace has a nil parent, and root is assigned + // mappings when it's created, so SetUIDMap would have returned EPERM + // without reaching this point if ns is root. + if !ns.parent.allIDsMapped(&ns.parent.uidMapToParent, e.FirstParentID, lastParentID) { + return syserror.EPERM + } + // If either of these Adds fail, we have an overlapping range. + if !ns.uidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) { + return syserror.EINVAL + } + if !ns.uidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) { + return syserror.EINVAL + } + } + return nil +} + +// SetGIDMap instructs ns to translate GIDs as specified by entries. +func (ns *UserNamespace) SetGIDMap(ctx context.Context, entries []IDMapEntry) error { + c := CredentialsFromContext(ctx) + + ns.mu.Lock() + defer ns.mu.Unlock() + if !ns.gidMapFromParent.IsEmpty() { + return syserror.EPERM + } + if len(entries) == 0 { + return syserror.EINVAL + } + if !c.HasCapabilityIn(linux.CAP_SETGID, ns) { + return syserror.EPERM + } + if c.UserNamespace != ns && c.UserNamespace != ns.parent { + return syserror.EPERM + } + if !c.HasCapabilityIn(linux.CAP_SETGID, ns.parent) { + if len(entries) != 1 || ns.parent.MapToKGID(GID(entries[0].FirstParentID)) != c.EffectiveKGID || entries[0].Length != 1 { + return syserror.EPERM + } + // It's correct for this to still be UID. + if c.EffectiveKUID != ns.owner { + return syserror.EPERM + } + // "In the case of gid_map, use of the setgroups(2) system call must + // first be denied by writing "deny" to the /proc/[pid]/setgroups file + // (see below) before writing to gid_map." (This file isn't implemented + // in the version of Linux we're emulating; see comment in + // UserNamespace.) + } + if err := ns.trySetGIDMap(entries); err != nil { + ns.gidMapFromParent.RemoveAll() + ns.gidMapToParent.RemoveAll() + return err + } + return nil +} + +func (ns *UserNamespace) trySetGIDMap(entries []IDMapEntry) error { + for _, e := range entries { + lastID := e.FirstID + e.Length + if lastID <= e.FirstID { + return syserror.EINVAL + } + lastParentID := e.FirstParentID + e.Length + if lastParentID <= e.FirstParentID { + return syserror.EINVAL + } + if !ns.parent.allIDsMapped(&ns.parent.gidMapToParent, e.FirstParentID, lastParentID) { + return syserror.EPERM + } + if !ns.gidMapFromParent.Add(idMapRange{e.FirstParentID, lastParentID}, e.FirstID) { + return syserror.EINVAL + } + if !ns.gidMapToParent.Add(idMapRange{e.FirstID, lastID}, e.FirstParentID) { + return syserror.EINVAL + } + } + return nil +} + +// UIDMap returns the user ID mappings configured for ns. If no mappings +// have been configured, UIDMap returns nil. +func (ns *UserNamespace) UIDMap() []IDMapEntry { + return ns.getIDMap(&ns.uidMapToParent) +} + +// GIDMap returns the group ID mappings configured for ns. If no mappings +// have been configured, GIDMap returns nil. +func (ns *UserNamespace) GIDMap() []IDMapEntry { + return ns.getIDMap(&ns.gidMapToParent) +} + +func (ns *UserNamespace) getIDMap(m *idMapSet) []IDMapEntry { + ns.mu.Lock() + defer ns.mu.Unlock() + var entries []IDMapEntry + for it := m.FirstSegment(); it.Ok(); it = it.NextSegment() { + entries = append(entries, IDMapEntry{ + FirstID: it.Start(), + FirstParentID: it.Value(), + Length: it.Range().Length(), + }) + } + return entries +} diff --git a/pkg/sentry/kernel/auth/id_map_functions.go b/pkg/sentry/kernel/auth/id_map_functions.go new file mode 100644 index 000000000..889291d96 --- /dev/null +++ b/pkg/sentry/kernel/auth/id_map_functions.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +// idMapFunctions "implements" generic interface segment.Functions for +// idMapSet. An idMapSet maps non-overlapping ranges of contiguous IDs in one +// user namespace to non-overlapping ranges of contiguous IDs in another user +// namespace. Each such ID mapping is implemented as a range-to-value mapping +// in the set such that [range.Start(), range.End()) => [value, value + +// range.Length()). +type idMapFunctions struct{} + +func (idMapFunctions) MinKey() uint32 { + return 0 +} + +func (idMapFunctions) MaxKey() uint32 { + return NoID +} + +func (idMapFunctions) ClearValue(*uint32) {} + +func (idMapFunctions) Merge(r1 idMapRange, val1 uint32, r2 idMapRange, val2 uint32) (uint32, bool) { + // Mapped ranges have to be contiguous. + if val1+r1.Length() != val2 { + return 0, false + } + return val1, true +} + +func (idMapFunctions) Split(r idMapRange, val uint32, split uint32) (uint32, uint32) { + return val, val + (split - r.Start) +} diff --git a/pkg/sentry/kernel/auth/user_namespace.go b/pkg/sentry/kernel/auth/user_namespace.go new file mode 100644 index 000000000..0980aeadf --- /dev/null +++ b/pkg/sentry/kernel/auth/user_namespace.go @@ -0,0 +1,130 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "math" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// A UserNamespace represents a user namespace. See user_namespaces(7) for +// details. +type UserNamespace struct { + // parent is this namespace's parent. If this is the root namespace, parent + // is nil. The parent pointer is immutable. + parent *UserNamespace + + // owner is the effective UID of the namespace's creator in the root + // namespace. owner is immutable. + owner KUID + + // mu protects the following fields. + // + // If mu will be locked in multiple UserNamespaces, it must be locked in + // descendant namespaces before ancestors. + mu sync.Mutex `state:"nosave"` + + // Mappings of user/group IDs between this namespace and its parent. + // + // All ID maps, once set, cannot be changed. This means that successful + // UID/GID translations cannot be racy. + uidMapFromParent idMapSet + uidMapToParent idMapSet + gidMapFromParent idMapSet + gidMapToParent idMapSet + + // TODO: Consider supporting disabling setgroups(2), which "was + // added in Linux 3.19, but was backported to many earlier stable kernel + // series, because it addresses a security issue" - user_namespaces(7). (It + // was not backported to 3.11.10, which we are currently imitating.) +} + +// NewRootUserNamespace returns a UserNamespace that is appropriate for a +// system's root user namespace. +func NewRootUserNamespace() *UserNamespace { + var ns UserNamespace + // """ + // The initial user namespace has no parent namespace, but, for + // consistency, the kernel provides dummy user and group ID mapping files + // for this namespace. Looking at the uid_map file (gid_map is the same) + // from a shell in the initial namespace shows: + // + // $ cat /proc/$$/uid_map + // 0 0 4294967295 + // """ - user_namespaces(7) + for _, m := range []*idMapSet{ + &ns.uidMapFromParent, + &ns.uidMapToParent, + &ns.gidMapFromParent, + &ns.gidMapToParent, + } { + if !m.Add(idMapRange{0, math.MaxUint32}, 0) { + panic("Failed to insert into empty ID map") + } + } + return &ns +} + +// Root returns the root of the user namespace tree containing ns. +func (ns *UserNamespace) Root() *UserNamespace { + for ns.parent != nil { + ns = ns.parent + } + return ns +} + +// "The kernel imposes (since version 3.11) a limit of 32 nested levels of user +// namespaces." - user_namespaces(7) +const maxUserNamespaceDepth = 32 + +func (ns *UserNamespace) depth() int { + var i int + for ns != nil { + i++ + ns = ns.parent + } + return i +} + +// NewChildUserNamespace returns a new user namespace created by a caller with +// credentials c. +func (c *Credentials) NewChildUserNamespace() (*UserNamespace, error) { + if c.UserNamespace.depth() >= maxUserNamespaceDepth { + // "... Calls to unshare(2) or clone(2) that would cause this limit to + // be exceeded fail with the error EUSERS." - user_namespaces(7) + return nil, syserror.EUSERS + } + // "EPERM: CLONE_NEWUSER was specified in flags, but either the effective + // user ID or the effective group ID of the caller does not have a mapping + // in the parent namespace (see user_namespaces(7))." - clone(2) + // "CLONE_NEWUSER requires that the user ID and group ID of the calling + // process are mapped to user IDs and group IDs in the user namespace of + // the calling process at the time of the call." - unshare(2) + if !c.EffectiveKUID.In(c.UserNamespace).Ok() { + return nil, syserror.EPERM + } + if !c.EffectiveKGID.In(c.UserNamespace).Ok() { + return nil, syserror.EPERM + } + return &UserNamespace{ + parent: c.UserNamespace, + owner: c.EffectiveKUID, + // "When a user namespace is created, it starts without a mapping of + // user IDs (group IDs) to the parent user namespace." - + // user_namespaces(7) + }, nil +} diff --git a/pkg/sentry/kernel/context.go b/pkg/sentry/kernel/context.go new file mode 100644 index 000000000..261ca6f7a --- /dev/null +++ b/pkg/sentry/kernel/context.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the kernel package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxCanTrace is a Context.Value key for a function with the same + // signature and semantics as kernel.Task.CanTrace. + CtxCanTrace contextID = iota + + // CtxKernel is a Context.Value key for a Kernel. + CtxKernel + + // CtxPIDNamespace is a Context.Value key for a PIDNamespace. + CtxPIDNamespace + + // CtxTask is a Context.Value key for a Task. + CtxTask + + // CtxUTSNamespace is a Context.Value key for a UTSNamespace. + CtxUTSNamespace + + // CtxIPCNamespace is a Context.Value key for a IPCNamespace. + CtxIPCNamespace +) + +// ContextCanTrace returns true if ctx is permitted to trace t, in the same sense +// as kernel.Task.CanTrace. +func ContextCanTrace(ctx context.Context, t *Task, attach bool) bool { + if v := ctx.Value(CtxCanTrace); v != nil { + return v.(func(*Task, bool) bool)(t, attach) + } + return false +} + +// KernelFromContext returns the Kernel in which ctx is executing, or nil if +// there is no such Kernel. +func KernelFromContext(ctx context.Context) *Kernel { + if v := ctx.Value(CtxKernel); v != nil { + return v.(*Kernel) + } + return nil +} + +// PIDNamespaceFromContext returns the PID namespace in which ctx is executing, +// or nil if there is no such PID namespace. +func PIDNamespaceFromContext(ctx context.Context) *PIDNamespace { + if v := ctx.Value(CtxPIDNamespace); v != nil { + return v.(*PIDNamespace) + } + return nil +} + +// UTSNamespaceFromContext returns the UTS namespace in which ctx is executing, +// or nil if there is no such UTS namespace. +func UTSNamespaceFromContext(ctx context.Context) *UTSNamespace { + if v := ctx.Value(CtxUTSNamespace); v != nil { + return v.(*UTSNamespace) + } + return nil +} + +// IPCNamespaceFromContext returns the IPC namespace in which ctx is executing, +// or nil if there is no such IPC namespace. +func IPCNamespaceFromContext(ctx context.Context) *IPCNamespace { + if v := ctx.Value(CtxIPCNamespace); v != nil { + return v.(*IPCNamespace) + } + return nil +} + +// TaskFromContext returns the Task associated with ctx, or nil if there is no +// such Task. +func TaskFromContext(ctx context.Context) *Task { + if v := ctx.Value(CtxTask); v != nil { + return v.(*Task) + } + return nil +} + +// AsyncContext returns a context.Context that may be used by goroutines that +// do work on behalf of t and therefore share its contextual values, but are +// not t's task goroutine (e.g. asynchronous I/O). +func (t *Task) AsyncContext() context.Context { + return taskAsyncContext{t: t} +} + +type taskAsyncContext struct { + context.NoopSleeper + t *Task +} + +// Debugf implements log.Logger.Debugf. +func (ctx taskAsyncContext) Debugf(format string, v ...interface{}) { + ctx.t.Debugf(format, v...) +} + +// Infof implements log.Logger.Infof. +func (ctx taskAsyncContext) Infof(format string, v ...interface{}) { + ctx.t.Infof(format, v...) +} + +// Warningf implements log.Logger.Warningf. +func (ctx taskAsyncContext) Warningf(format string, v ...interface{}) { + ctx.t.Warningf(format, v...) +} + +// IsLogging implements log.Logger.IsLogging. +func (ctx taskAsyncContext) IsLogging(level log.Level) bool { + return ctx.t.IsLogging(level) +} + +// Value implements context.Context.Value. +func (ctx taskAsyncContext) Value(key interface{}) interface{} { + return ctx.t.Value(key) +} diff --git a/pkg/sentry/kernel/epoll/BUILD b/pkg/sentry/kernel/epoll/BUILD new file mode 100644 index 000000000..04651d961 --- /dev/null +++ b/pkg/sentry/kernel/epoll/BUILD @@ -0,0 +1,52 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "epoll_autogen_state", + srcs = [ + "epoll.go", + "epoll_state.go", + ], + out = "epoll_autogen_state.go", + package = "epoll", +) + +go_library( + name = "epoll", + srcs = [ + "epoll.go", + "epoll_autogen_state.go", + "epoll_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/epoll", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/ilist", + "//pkg/refs", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/waiter", + ], +) + +go_test( + name = "epoll_test", + size = "small", + srcs = [ + "epoll_test.go", + ], + embed = [":epoll"], + deps = [ + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs/filetest", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/kernel/epoll/epoll.go b/pkg/sentry/kernel/epoll/epoll.go new file mode 100644 index 000000000..b572fcd7e --- /dev/null +++ b/pkg/sentry/kernel/epoll/epoll.go @@ -0,0 +1,466 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package epoll provides an implementation of Linux's IO event notification +// facility. See epoll(7) for more details. +package epoll + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Event describes the event mask that was observed and the user data to be +// returned when one of the events occurs. It has this format to match the linux +// format to avoid extra copying/allocation when writing events to userspace. +type Event struct { + // Events is the event mask containing the set of events that have been + // observed on an entry. + Events uint32 + + // Data is an opaque 64-bit value provided by the caller when adding the + // entry, and returned to the caller when the entry reports an event. + Data [2]int32 +} + +// EntryFlags is a bitmask that holds an entry's flags. +type EntryFlags int + +// Valid entry flags. +const ( + OneShot EntryFlags = 1 << iota + EdgeTriggered +) + +// FileIdentifier identifies a file. We cannot use just the FD because it could +// potentially be reassigned. We also cannot use just the file pointer because +// it is possible to have multiple entries for the same file object as long as +// they are created with different FDs (i.e., the FDs point to the same file). +type FileIdentifier struct { + File *fs.File + Fd kdefs.FD +} + +// pollEntry holds all the state associated with an event poll entry, that is, +// a file being observed by an event poll object. +type pollEntry struct { + ilist.Entry + file *refs.WeakRef `state:"manual"` + id FileIdentifier `state:"wait"` + userData [2]int32 + waiter waiter.Entry `state:"manual"` + mask waiter.EventMask + flags EntryFlags + + epoll *EventPoll + + // We cannot save the current list pointer as it points into EventPoll + // struct, while state framework currently does not support such + // in-struct pointers. Instead, EventPoll will properly set this field + // in its loading logic. + curList *ilist.List `state:"nosave"` +} + +// WeakRefGone implements refs.WeakRefUser.WeakRefGone. +// weakReferenceGone is called when the file in the weak reference is destroyed. +// The poll entry is removed in response to this. +func (p *pollEntry) WeakRefGone() { + p.epoll.RemoveEntry(p.id) +} + +// EventPoll holds all the state associated with an event poll object, that is, +// collection of files to observe and their current state. +type EventPoll struct { + fsutil.PipeSeek `state:"zerovalue"` + fsutil.NotDirReaddir `state:"zerovalue"` + fsutil.NoFsync `state:"zerovalue"` + fsutil.NoopFlush `state:"zerovalue"` + fsutil.NoMMap `state:"zerovalue"` + fsutil.NoIoctl `state:"zerovalue"` + + // Wait queue is used to notify interested parties when the event poll + // object itself becomes readable or writable. + waiter.Queue + + // files is the map of all the files currently being observed, it is + // protected by mu. + mu sync.Mutex `state:"nosave"` + files map[FileIdentifier]*pollEntry + + // listsMu protects manipulation of the lists below. It needs to be a + // different lock to avoid circular lock acquisition order involving + // the wait queue mutexes and mu. The full order is mu, observed file + // wait queue mutex, then listsMu; this allows listsMu to be acquired + // when readyCallback is called. + // + // An entry is always in one of the following lists: + // readyList -- when there's a chance that it's ready to have + // events delivered to epoll waiters. Given that being + // ready is a transient state, the Readiness() and + // readEvents() functions always call the entry's file + // Readiness() function to confirm it's ready. + // waitingList -- when there's no chance that the entry is ready, + // so it's waiting for the readyCallback to be called + // on it before it gets moved to the readyList. + // disabledList -- when the entry is disabled. This happens when + // a one-shot entry gets delivered via readEvents(). + listsMu sync.Mutex `state:"nosave"` + readyList ilist.List + waitingList ilist.List + disabledList ilist.List +} + +// cycleMu is used to serialize all the cycle checks. This is only used when +// an event poll file is added as an entry to another event poll. Such checks +// are serialized to avoid lock acquisition order inversion: if a thread is +// adding A to B, and another thread is adding B to A, each would acquire A's +// and B's mutexes in reverse order, and could cause deadlocks. Having this +// lock prevents this by allowing only one check at a time to happen. +// +// We do the cycle check to prevent callers from introducing potentially +// infinite recursions. If a caller were to add A to B and then B to A, for +// event poll A to know if it's readable, it would need to check event poll B, +// which in turn would need event poll A and so on indefinitely. +var cycleMu sync.Mutex + +// NewEventPoll allocates and initializes a new event poll object. +func NewEventPoll(ctx context.Context) *fs.File { + // name matches fs/eventpoll.c:epoll_create1. + dirent := fs.NewDirent(anon.NewInode(ctx), fmt.Sprintf("anon_inode:[eventpoll]")) + return fs.NewFile(ctx, dirent, fs.FileFlags{}, &EventPoll{ + files: make(map[FileIdentifier]*pollEntry), + }) +} + +// Release implements fs.FileOperations.Release. +func (e *EventPoll) Release() { + // We need to take the lock now because files may be attempting to + // remove entries in parallel if they get destroyed. + e.mu.Lock() + defer e.mu.Unlock() + + // Go through all entries and clean up. + for _, entry := range e.files { + entry.id.File.EventUnregister(&entry.waiter) + entry.file.Drop() + } +} + +// Read implements fs.FileOperations.Read. +func (*EventPoll) Read(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syscall.ENOSYS +} + +// Write implements fs.FileOperations.Write. +func (*EventPoll) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) { + return 0, syscall.ENOSYS +} + +// eventsAvailable determines if 'e' has events available for delivery. +func (e *EventPoll) eventsAvailable() bool { + e.listsMu.Lock() + + for it := e.readyList.Front(); it != nil; { + entry := it.(*pollEntry) + it = it.Next() + + // If the entry is ready, we know 'e' has at least one entry + // ready for delivery. + ready := entry.id.File.Readiness(entry.mask) + if ready != 0 { + e.listsMu.Unlock() + return true + } + + // Entry is not ready, so move it to waiting list. + e.readyList.Remove(entry) + e.waitingList.PushBack(entry) + entry.curList = &e.waitingList + } + + e.listsMu.Unlock() + + return false +} + +// Readiness determines if the event poll object is currently readable (i.e., +// if there are pending events for delivery). +func (e *EventPoll) Readiness(mask waiter.EventMask) waiter.EventMask { + ready := waiter.EventMask(0) + + if (mask&waiter.EventIn) != 0 && e.eventsAvailable() { + ready |= waiter.EventIn + } + + return ready +} + +// ReadEvents returns up to max available events. +func (e *EventPoll) ReadEvents(max int) []Event { + var local ilist.List + var ret []Event + + e.listsMu.Lock() + + // Go through all entries we believe may be ready. + for it := e.readyList.Front(); it != nil && len(ret) < max; { + entry := it.(*pollEntry) + it = it.Next() + + // Check the entry's readiness. It it's not really ready, we + // just put it back in the waiting list and move on to the next + // entry. + ready := entry.id.File.Readiness(entry.mask) & entry.mask + if ready == 0 { + e.readyList.Remove(entry) + e.waitingList.PushBack(entry) + entry.curList = &e.waitingList + + continue + } + + // Add event to the array that will be returned to caller. + ret = append(ret, Event{ + Events: uint32(ready), + Data: entry.userData, + }) + + // The entry is consumed, so we must move it to the disabled + // list in case it's one-shot, or back to the wait list if it's + // edge-triggered. If it's neither, we leave it in the ready + // list so that its readiness can be checked the next time + // around; however, we must move it to the end of the list so + // that other events can be delivered as well. + e.readyList.Remove(entry) + if entry.flags&OneShot != 0 { + e.disabledList.PushBack(entry) + entry.curList = &e.disabledList + } else if entry.flags&EdgeTriggered != 0 { + e.waitingList.PushBack(entry) + entry.curList = &e.waitingList + } else { + local.PushBack(entry) + } + } + + e.readyList.PushBackList(&local) + + e.listsMu.Unlock() + + return ret +} + +// readyCallback is called when one of the files we're polling becomes ready. It +// moves said file to the readyList if it's currently in the waiting list. +type readyCallback struct{} + +// Callback implements waiter.EntryCallback.Callback. +func (*readyCallback) Callback(w *waiter.Entry) { + entry := w.Context.(*pollEntry) + e := entry.epoll + + e.listsMu.Lock() + + if entry.curList == &e.waitingList { + e.waitingList.Remove(entry) + e.readyList.PushBack(entry) + entry.curList = &e.readyList + + e.Notify(waiter.EventIn) + } + + e.listsMu.Unlock() +} + +// initEntryReadiness initializes the entry's state with regards to its +// readiness by placing it in the appropriate list and registering for +// notifications. +func (e *EventPoll) initEntryReadiness(entry *pollEntry) { + // A new entry starts off in the waiting list. + e.listsMu.Lock() + e.waitingList.PushBack(entry) + entry.curList = &e.waitingList + e.listsMu.Unlock() + + // Register for event notifications. + f := entry.id.File + f.EventRegister(&entry.waiter, entry.mask) + + // Check if the file happens to already be in a ready state. + ready := f.Readiness(entry.mask) & entry.mask + if ready != 0 { + (*readyCallback).Callback(nil, &entry.waiter) + } +} + +// observes checks if event poll object e is directly or indirectly observing +// event poll object ep. It uses a bounded recursive depth-first search. +func (e *EventPoll) observes(ep *EventPoll, depthLeft int) bool { + // If we reached the maximum depth, we'll consider that we found it + // because we don't want to allow chains that are too long. + if depthLeft <= 0 { + return true + } + + e.mu.Lock() + defer e.mu.Unlock() + + // Go through each observed file and check if it is or observes ep. + for id := range e.files { + f, ok := id.File.FileOperations.(*EventPoll) + if !ok { + continue + } + + if f == ep || f.observes(ep, depthLeft-1) { + return true + } + } + + return false +} + +// AddEntry adds a new file to the collection of files observed by e. +func (e *EventPoll) AddEntry(id FileIdentifier, flags EntryFlags, mask waiter.EventMask, data [2]int32) error { + // Acquire cycle check lock if another event poll is being added. + ep, ok := id.File.FileOperations.(*EventPoll) + if ok { + cycleMu.Lock() + defer cycleMu.Unlock() + } + + e.mu.Lock() + defer e.mu.Unlock() + + // Fail if the file already has an entry. + if _, ok := e.files[id]; ok { + return syscall.EEXIST + } + + // Check if a cycle would be created. We use 4 as the limit because + // that's the value used by linux and we want to emulate it. + if ep != nil { + if e == ep { + return syscall.EINVAL + } + + if ep.observes(e, 4) { + return syscall.ELOOP + } + } + + // Create new entry and add it to map. + // + // N.B. Even though we are creating a weak reference here, we know it + // won't trigger a callback because we hold a reference to the file + // throughout the execution of this function. + entry := &pollEntry{ + id: id, + userData: data, + epoll: e, + flags: flags, + waiter: waiter.Entry{Callback: &readyCallback{}}, + mask: mask, + } + entry.waiter.Context = entry + e.files[id] = entry + entry.file = refs.NewWeakRef(id.File, entry) + + // Initialize the readiness state of the new entry. + e.initEntryReadiness(entry) + + return nil +} + +// UpdateEntry updates the flags, mask and user data associated with a file that +// is already part of the collection of observed files. +func (e *EventPoll) UpdateEntry(id FileIdentifier, flags EntryFlags, mask waiter.EventMask, data [2]int32) error { + e.mu.Lock() + defer e.mu.Unlock() + + // Fail if the file doesn't have an entry. + entry, ok := e.files[id] + if !ok { + return syscall.ENOENT + } + + // Unregister the old mask and remove entry from the list it's in, so + // readyCallback is guaranteed to not be called on this entry anymore. + entry.id.File.EventUnregister(&entry.waiter) + + // Remove entry from whatever list it's in. This ensure that no other + // threads have access to this entry as the only way left to find it + // is via e.files, but we hold e.mu, which prevents that. + e.listsMu.Lock() + entry.curList.Remove(entry) + e.listsMu.Unlock() + + // Initialize new readiness state. + entry.flags = flags + entry.mask = mask + entry.userData = data + e.initEntryReadiness(entry) + + return nil +} + +// RemoveEntry a files from the collection of observed files. +func (e *EventPoll) RemoveEntry(id FileIdentifier) error { + e.mu.Lock() + defer e.mu.Unlock() + + // Fail if the file doesn't have an entry. + entry, ok := e.files[id] + if !ok { + return syscall.ENOENT + } + + // Unregister from file first so that no concurrent attempts will be + // made to manipulate the file. + entry.id.File.EventUnregister(&entry.waiter) + + // Remove from the current list. + e.listsMu.Lock() + entry.curList.Remove(entry) + entry.curList = nil + e.listsMu.Unlock() + + // Remove file from map, and drop weak reference. + delete(e.files, id) + entry.file.Drop() + + return nil +} + +// UnregisterEpollWaiters removes the epoll waiter objects from the waiting +// queues. This is different from Release() as the file is not dereferenced. +func (e *EventPoll) UnregisterEpollWaiters() { + e.mu.Lock() + defer e.mu.Unlock() + + for _, entry := range e.files { + entry.id.File.EventUnregister(&entry.waiter) + } +} diff --git a/pkg/sentry/kernel/epoll/epoll_state.go b/pkg/sentry/kernel/epoll/epoll_state.go new file mode 100644 index 000000000..dabb32f49 --- /dev/null +++ b/pkg/sentry/kernel/epoll/epoll_state.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epoll + +import ( + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// afterLoad is invoked by stateify. +func (p *pollEntry) afterLoad() { + p.waiter = waiter.Entry{Callback: &readyCallback{}} + p.waiter.Context = p + p.file = refs.NewWeakRef(p.id.File, p) + p.id.File.EventRegister(&p.waiter, p.mask) +} + +// afterLoad is invoked by stateify. +func (e *EventPoll) afterLoad() { + e.listsMu.Lock() + defer e.listsMu.Unlock() + + for _, ls := range []*ilist.List{&e.waitingList, &e.readyList, &e.disabledList} { + for it := ls.Front(); it != nil; it = it.Next() { + it.(*pollEntry).curList = ls + } + } + + for it := e.waitingList.Front(); it != nil; it = it.Next() { + p := it.(*pollEntry) + if p.id.File.Readiness(p.mask) != 0 { + e.waitingList.Remove(p) + e.readyList.PushBack(p) + p.curList = &e.readyList + e.Notify(waiter.EventIn) + } + } +} diff --git a/pkg/sentry/kernel/epoll/epoll_test.go b/pkg/sentry/kernel/epoll/epoll_test.go new file mode 100644 index 000000000..bc869fc13 --- /dev/null +++ b/pkg/sentry/kernel/epoll/epoll_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epoll + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/filetest" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +func TestFileDestroyed(t *testing.T) { + f := filetest.NewTestFile(t) + id := FileIdentifier{f, 12} + + efile := NewEventPoll(contexttest.Context(t)) + e := efile.FileOperations.(*EventPoll) + if err := e.AddEntry(id, 0, waiter.EventIn, [2]int32{}); err != nil { + t.Fatalf("addEntry failed: %v", err) + } + + // Check that we get an event reported twice in a row. + evt := e.ReadEvents(1) + if len(evt) != 1 { + t.Fatalf("Unexpected number of ready events: want %v, got %v", 1, len(evt)) + } + + evt = e.ReadEvents(1) + if len(evt) != 1 { + t.Fatalf("Unexpected number of ready events: want %v, got %v", 1, len(evt)) + } + + // Destroy the file. Check that we get no more events. + f.DecRef() + + evt = e.ReadEvents(1) + if len(evt) != 0 { + t.Fatalf("Unexpected number of ready events: want %v, got %v", 0, len(evt)) + } + +} diff --git a/pkg/sentry/kernel/eventfd/BUILD b/pkg/sentry/kernel/eventfd/BUILD new file mode 100644 index 000000000..2d5a3c693 --- /dev/null +++ b/pkg/sentry/kernel/eventfd/BUILD @@ -0,0 +1,46 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "eventfd_state", + srcs = [ + "eventfd.go", + ], + out = "eventfd_state.go", + package = "eventfd", +) + +go_library( + name = "eventfd", + srcs = [ + "eventfd.go", + "eventfd_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/eventfd", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/refs", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/waiter", + ], +) + +go_test( + name = "eventfd_test", + size = "small", + srcs = ["eventfd_test.go"], + embed = [":eventfd"], + deps = [ + "//pkg/sentry/context/contexttest", + "//pkg/sentry/usermem", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/kernel/eventfd/eventfd.go b/pkg/sentry/kernel/eventfd/eventfd.go new file mode 100644 index 000000000..c9333719e --- /dev/null +++ b/pkg/sentry/kernel/eventfd/eventfd.go @@ -0,0 +1,172 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package eventfd provides an implementation of Linux's file-based event +// notification. +package eventfd + +import ( + "math" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// EventOperations represents an event with the semantics of Linux's file-based event +// notification (eventfd). +type EventOperations struct { + fsutil.NoopRelease `state:"nosave"` + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + fsutil.NoIoctl `state:"nosave"` + + // Mutex that protects accesses to the fields of this event. + mu sync.Mutex `state:"nosave"` + + // Queue is used to notify interested parties when the event object + // becomes readable or writable. + waiter.Queue `state:"nosave"` + + // val is the current value of the event counter. + val uint64 + + // semMode specifies whether the event is in "semaphore" mode. + semMode bool +} + +// New creates a new event object with the supplied initial value and mode. +func New(ctx context.Context, initVal uint64, semMode bool) *fs.File { + // name matches fs/eventfd.c:eventfd_file_create. + dirent := fs.NewDirent(anon.NewInode(ctx), "anon_inode:[eventfd]") + return fs.NewFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}, &EventOperations{ + val: initVal, + semMode: semMode, + }) +} + +// Read implements fs.FileOperations.Read. +func (e *EventOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() < 8 { + return 0, syscall.EINVAL + } + if err := e.read(ctx, dst); err != nil { + return 0, err + } + return 8, nil +} + +// Write implements fs.FileOperations.Write. +func (e *EventOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + if src.NumBytes() < 8 { + return 0, syscall.EINVAL + } + if err := e.write(ctx, src); err != nil { + return 0, err + } + return 8, nil +} + +func (e *EventOperations) read(ctx context.Context, dst usermem.IOSequence) error { + e.mu.Lock() + + // We can't complete the read if the value is currently zero. + if e.val == 0 { + e.mu.Unlock() + return syserror.ErrWouldBlock + } + + // Update the value based on the mode the event is operating in. + var val uint64 + if e.semMode { + val = 1 + // Consistent with Linux, this is done even if writing to memory fails. + e.val-- + } else { + val = e.val + e.val = 0 + } + + e.mu.Unlock() + + // Notify writers. We do this even if we were already writable because + // it is possible that a writer is waiting to write the maximum value + // to the event. + e.Notify(waiter.EventOut) + + var buf [8]byte + usermem.ByteOrder.PutUint64(buf[:], val) + _, err := dst.CopyOut(ctx, buf[:]) + return err +} + +func (e *EventOperations) write(ctx context.Context, src usermem.IOSequence) error { + var buf [8]byte + if _, err := src.CopyIn(ctx, buf[:]); err != nil { + return err + } + val := usermem.ByteOrder.Uint64(buf[:]) + + return e.Signal(val) +} + +// Signal is an internal function to signal the event fd. +func (e *EventOperations) Signal(val uint64) error { + if val == math.MaxUint64 { + return syscall.EINVAL + } + + e.mu.Lock() + + // We only allow writes that won't cause the value to go over the max + // uint64 minus 1. + if val > math.MaxUint64-1-e.val { + e.mu.Unlock() + return syserror.ErrWouldBlock + } + + e.val += val + e.mu.Unlock() + + // Always trigger a notification. + e.Notify(waiter.EventIn) + + return nil +} + +// Readiness returns the ready events for the event fd. +func (e *EventOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + ready := waiter.EventMask(0) + + e.mu.Lock() + if e.val > 0 { + ready |= waiter.EventIn + } + + if e.val < math.MaxUint64-1 { + ready |= waiter.EventOut + } + e.mu.Unlock() + + return mask & ready +} diff --git a/pkg/sentry/kernel/eventfd/eventfd_test.go b/pkg/sentry/kernel/eventfd/eventfd_test.go new file mode 100644 index 000000000..71326b62f --- /dev/null +++ b/pkg/sentry/kernel/eventfd/eventfd_test.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package eventfd + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +func TestEventfd(t *testing.T) { + initVals := []uint64{ + 0, + // Using a non-zero initial value verifies that writing to an + // eventfd signals when the eventfd's counter was already + // non-zero. + 343, + } + + for _, initVal := range initVals { + ctx := contexttest.Context(t) + + // Make a new event that is writable. + event := New(ctx, initVal, false) + + // Register a callback for a write event. + w, ch := waiter.NewChannelEntry(nil) + event.EventRegister(&w, waiter.EventIn) + defer event.EventUnregister(&w) + + data := []byte("00000124") + // Create and submit a write request. + n, err := event.Writev(ctx, usermem.BytesIOSequence(data)) + if err != nil { + t.Fatal(err) + } + if n != 8 { + t.Errorf("eventfd.write wrote %d bytes, not full int64", n) + } + + // Check if the callback fired due to the write event. + select { + case <-ch: + default: + t.Errorf("Didn't get notified of EventIn after write") + } + } +} + +func TestEventfdStat(t *testing.T) { + ctx := contexttest.Context(t) + + // Make a new event that is writable. + event := New(ctx, 0, false) + + // Create and submit an stat request. + uattr, err := event.Dirent.Inode.UnstableAttr(ctx) + if err != nil { + t.Fatalf("eventfd stat request failed: %v", err) + } + if uattr.Size != 0 { + t.Fatal("EventFD size should be 0") + } +} diff --git a/pkg/sentry/kernel/fd_map.go b/pkg/sentry/kernel/fd_map.go new file mode 100644 index 000000000..ef73125fd --- /dev/null +++ b/pkg/sentry/kernel/fd_map.go @@ -0,0 +1,340 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "bytes" + "fmt" + "sort" + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" +) + +// FDs is an ordering of FD's that can be made stable. +type FDs []kdefs.FD + +func (f FDs) Len() int { + return len(f) +} + +func (f FDs) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +func (f FDs) Less(i, j int) bool { + return f[i] < f[j] +} + +// FDFlags define flags for an individual descriptor. +type FDFlags struct { + // CloseOnExec indicates the descriptor should be closed on exec. + CloseOnExec bool +} + +// descriptor holds the details about a file descriptor, namely a pointer the +// file itself and the descriptor flags. +type descriptor struct { + file *fs.File + flags FDFlags +} + +// FDMap is used to manage File references and flags. +type FDMap struct { + refs.AtomicRefCount + k *Kernel + files map[kdefs.FD]descriptor + mu sync.RWMutex `state:"nosave"` + uid uint64 +} + +// ID returns a unique identifier for this FDMap. +func (f *FDMap) ID() uint64 { + return f.uid +} + +// NewFDMap allocates a new FDMap that may be used by tasks in k. +func (k *Kernel) NewFDMap() *FDMap { + return &FDMap{ + k: k, + files: make(map[kdefs.FD]descriptor), + uid: atomic.AddUint64(&k.fdMapUids, 1), + } +} + +// destroy removes all of the file descriptors from the map. +func (f *FDMap) destroy() { + f.RemoveIf(func(*fs.File, FDFlags) bool { + return true + }) +} + +// DecRef implements RefCounter.DecRef with destructor f.destroy. +func (f *FDMap) DecRef() { + f.DecRefWithDestructor(f.destroy) +} + +// Size returns the number of file descriptor slots currently allocated. +func (f *FDMap) Size() int { + f.mu.RLock() + defer f.mu.RUnlock() + + return len(f.files) +} + +// String is a stringer for FDMap. +func (f *FDMap) String() string { + f.mu.RLock() + defer f.mu.RUnlock() + + var b bytes.Buffer + for k, v := range f.files { + n, _ := v.file.Dirent.FullName(nil /* root */) + b.WriteString(fmt.Sprintf("\tfd:%d => name %s\n", k, n)) + } + return b.String() +} + +// NewFDFrom allocates a new FD guaranteed to be the lowest number available +// greater than or equal to from. This property is important as Unix programs +// tend to count on this allocation order. +func (f *FDMap) NewFDFrom(fd kdefs.FD, file *fs.File, flags FDFlags, limitSet *limits.LimitSet) (kdefs.FD, error) { + if fd < 0 { + // Don't accept negative FDs. + return 0, syscall.EINVAL + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Finds the lowest fd not in the handles map. + lim := limitSet.Get(limits.NumberOfFiles) + for i := fd; lim.Cur == limits.Infinity || i < kdefs.FD(lim.Cur); i++ { + if _, ok := f.files[i]; !ok { + file.IncRef() + f.files[i] = descriptor{file, flags} + return i, nil + } + } + + return -1, syscall.EMFILE +} + +// NewFDAt sets the file reference for the given FD. If there is an +// active reference for that FD, the ref count for that existing reference +// is decremented. +func (f *FDMap) NewFDAt(fd kdefs.FD, file *fs.File, flags FDFlags, limitSet *limits.LimitSet) error { + if fd < 0 { + // Don't accept negative FDs. + return syscall.EBADF + } + + // In this one case we do not do a defer of the Unlock. The + // reason is that we must have done all the work needed for + // discarding any old open file before we return to the + // caller. In other words, the DecRef(), below, must have + // completed by the time we return to the caller to ensure + // side effects are, in fact, effected. A classic example is + // dup2(fd1, fd2); if fd2 was already open, it must be closed, + // and we don't want to resume the caller until it is; we have + // to block on the DecRef(). Hence we can not just do a 'go + // oldfile.DecRef()', since there would be no guarantee that + // it would be done before we the caller resumed. Since we + // must wait for the DecRef() to finish, and that could take + // time, it's best to first call f.muUnlock beore so we are + // not blocking other uses of this FDMap on the DecRef() call. + f.mu.Lock() + oldDesc, oldExists := f.files[fd] + lim := limitSet.Get(limits.NumberOfFiles).Cur + // if we're closing one then the effective limit is one + // more than the actual limit. + if oldExists && lim != limits.Infinity { + lim++ + } + if lim != limits.Infinity && fd >= kdefs.FD(lim) { + f.mu.Unlock() + return syscall.EMFILE + } + + file.IncRef() + f.files[fd] = descriptor{file, flags} + f.mu.Unlock() + + if oldExists { + oldDesc.file.DecRef() + } + return nil +} + +// SetFlags sets the flags for the given file descriptor, if it is valid. +func (f *FDMap) SetFlags(fd kdefs.FD, flags FDFlags) { + f.mu.Lock() + defer f.mu.Unlock() + + desc, ok := f.files[fd] + if !ok { + return + } + + f.files[fd] = descriptor{desc.file, flags} +} + +// GetDescriptor returns a reference to the file and the flags for the FD. It +// bumps its reference count as well. It returns nil if there is no File +// for the FD, i.e. if the FD is invalid. The caller must use DecRef +// when they are done. +func (f *FDMap) GetDescriptor(fd kdefs.FD) (*fs.File, FDFlags) { + f.mu.RLock() + defer f.mu.RUnlock() + + if desc, ok := f.files[fd]; ok { + desc.file.IncRef() + return desc.file, desc.flags + } + return nil, FDFlags{} +} + +// GetFile returns a reference to the File for the FD and bumps +// its reference count as well. It returns nil if there is no File +// for the FD, i.e. if the FD is invalid. The caller must use DecRef +// when they are done. +func (f *FDMap) GetFile(fd kdefs.FD) *fs.File { + f.mu.RLock() + if desc, ok := f.files[fd]; ok { + desc.file.IncRef() + f.mu.RUnlock() + return desc.file + } + f.mu.RUnlock() + return nil +} + +// fds returns an ordering of FDs. +func (f *FDMap) fds() FDs { + fds := make(FDs, 0, len(f.files)) + for fd := range f.files { + fds = append(fds, fd) + } + sort.Sort(fds) + return fds +} + +// GetFDs returns a list of valid fds. +func (f *FDMap) GetFDs() FDs { + f.mu.RLock() + defer f.mu.RUnlock() + return f.fds() +} + +// GetRefs returns a stable slice of references to all files and bumps the +// reference count on each. The caller must use DecRef on each reference when +// they're done using the slice. +func (f *FDMap) GetRefs() []*fs.File { + f.mu.RLock() + defer f.mu.RUnlock() + + fds := f.fds() + fs := make([]*fs.File, 0, len(fds)) + for _, fd := range fds { + desc := f.files[fd] + desc.file.IncRef() + fs = append(fs, desc.file) + } + return fs +} + +// Fork returns an independent FDMap pointing to the same descriptors. +func (f *FDMap) Fork() *FDMap { + f.mu.RLock() + defer f.mu.RUnlock() + + clone := f.k.NewFDMap() + + // Grab a extra reference for every file. + for fd, desc := range f.files { + desc.file.IncRef() + clone.files[fd] = desc + } + + // That's it! + return clone +} + +// unlock releases all file locks held by this FDMap's uid. Must only be +// called on a non-nil *fs.File. +func (f *FDMap) unlock(file *fs.File) { + id := lock.UniqueID(f.ID()) + file.Dirent.Inode.LockCtx.Posix.UnlockRegion(id, lock.LockRange{0, lock.LockEOF}) +} + +// inotifyFileClose generates the appropriate inotify events for f being closed. +func inotifyFileClose(f *fs.File) { + var ev uint32 + d := f.Dirent + + if fs.IsDir(d.Inode.StableAttr) { + ev |= linux.IN_ISDIR + } + + if f.Flags().Write { + ev |= linux.IN_CLOSE_WRITE + } else { + ev |= linux.IN_CLOSE_NOWRITE + } + + d.InotifyEvent(ev, 0) +} + +// Remove removes an FD from the FDMap, and returns (File, true) if a File +// one was found. Callers are expected to decrement the reference count on +// the File. Otherwise returns (nil, false). +func (f *FDMap) Remove(fd kdefs.FD) (*fs.File, bool) { + f.mu.Lock() + desc := f.files[fd] + delete(f.files, fd) + f.mu.Unlock() + if desc.file != nil { + f.unlock(desc.file) + inotifyFileClose(desc.file) + return desc.file, true + } + return nil, false +} + +// RemoveIf removes all FDs where cond is true. +func (f *FDMap) RemoveIf(cond func(*fs.File, FDFlags) bool) { + var removed []*fs.File + f.mu.Lock() + for fd, desc := range f.files { + if desc.file != nil && cond(desc.file, desc.flags) { + delete(f.files, fd) + removed = append(removed, desc.file) + } + } + f.mu.Unlock() + + for _, file := range removed { + f.unlock(file) + inotifyFileClose(file) + file.DecRef() + } +} diff --git a/pkg/sentry/kernel/fd_map_test.go b/pkg/sentry/kernel/fd_map_test.go new file mode 100644 index 000000000..e1ac900e8 --- /dev/null +++ b/pkg/sentry/kernel/fd_map_test.go @@ -0,0 +1,134 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/filetest" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" +) + +const ( + // maxFD is the maximum FD to try to create in the map. + // This number of open files has been seen in the wild. + maxFD = 2 * 1024 +) + +func newTestFDMap() *FDMap { + return &FDMap{ + files: make(map[kdefs.FD]descriptor), + } +} + +// TestFDMapMany allocates maxFD FDs, i.e. maxes out the FDMap, +// until there is no room, then makes sure that NewFDAt works +// and also that if we remove one and add one that works too. +func TestFDMapMany(t *testing.T) { + file := filetest.NewTestFile(t) + limitSet := limits.NewLimitSet() + limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}) + + f := newTestFDMap() + for i := 0; i < maxFD; i++ { + if _, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err != nil { + t.Fatalf("Allocated %v FDs but wanted to allocate %v", i, maxFD) + } + } + + if _, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err == nil { + t.Fatalf("f.NewFDFrom(0, r) in full map: got nil, wanted error") + } + + if err := f.NewFDAt(1, file, FDFlags{}, limitSet); err != nil { + t.Fatalf("f.NewFDAt(1, r, FDFlags{}): got %v, wanted nil", err) + } +} + +// TestFDMap does a set of simple tests to make sure simple adds, +// removes, GetRefs, and DecRefs work. The ordering is just weird +// enough that a table-driven approach seemed clumsy. +func TestFDMap(t *testing.T) { + file := filetest.NewTestFile(t) + limitSet := limits.NewLimitSet() + limitSet.Set(limits.NumberOfFiles, limits.Limit{1, maxFD}) + + f := newTestFDMap() + if _, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err != nil { + t.Fatalf("Adding an FD to an empty 1-size map: got %v, want nil", err) + } + + if _, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err == nil { + t.Fatalf("Adding an FD to a filled 1-size map: got nil, wanted an error") + } + + largeLimit := limits.Limit{maxFD, maxFD} + limitSet.Set(limits.NumberOfFiles, largeLimit) + + if fd, err := f.NewFDFrom(0, file, FDFlags{}, limitSet); err != nil { + t.Fatalf("Adding an FD to a resized map: got %v, want nil", err) + } else if fd != kdefs.FD(1) { + t.Fatalf("Added an FD to a resized map: got %v, want 1", fd) + } + + if err := f.NewFDAt(1, file, FDFlags{}, limitSet); err != nil { + t.Fatalf("Replacing FD 1 via f.NewFDAt(1, r, FDFlags{}): got %v, wanted nil", err) + } + + if err := f.NewFDAt(maxFD+1, file, FDFlags{}, limitSet); err == nil { + t.Fatalf("Using an FD that was too large via f.NewFDAt(%v, r, FDFlags{}): got nil, wanted an error", maxFD+1) + } + + if ref := f.GetFile(1); ref == nil { + t.Fatalf("f.GetFile(1): got nil, wanted %v", file) + } + + if ref := f.GetFile(2); ref != nil { + t.Fatalf("f.GetFile(2): got a %v, wanted nil", ref) + } + + ref, ok := f.Remove(1) + if !ok { + t.Fatalf("f.Remove(1) for an existing FD: failed, want success") + } + ref.DecRef() + + if ref, ok := f.Remove(1); ok { + ref.DecRef() + t.Fatalf("r.Remove(1) for a removed FD: got success, want failure") + } + +} + +func TestDescriptorFlags(t *testing.T) { + file := filetest.NewTestFile(t) + f := newTestFDMap() + limitSet := limits.NewLimitSet() + limitSet.Set(limits.NumberOfFiles, limits.Limit{maxFD, maxFD}) + + if err := f.NewFDAt(2, file, FDFlags{CloseOnExec: true}, limitSet); err != nil { + t.Fatalf("f.NewFDAt(2, r, FDFlags{}): got %v, wanted nil", err) + } + + newFile, flags := f.GetDescriptor(2) + if newFile == nil { + t.Fatalf("f.GetFile(2): got a %v, wanted nil", newFile) + } + + if !flags.CloseOnExec { + t.Fatalf("new File flags %d don't match original %d\n", flags, 0) + } +} diff --git a/pkg/sentry/kernel/fs_context.go b/pkg/sentry/kernel/fs_context.go new file mode 100644 index 000000000..9aa6fa951 --- /dev/null +++ b/pkg/sentry/kernel/fs_context.go @@ -0,0 +1,172 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// FSContext contains filesystem context. +// +// This includes umask and working directory. +type FSContext struct { + refs.AtomicRefCount + + // mu protects below. + mu sync.Mutex `state:"nosave"` + + // root is the filesystem root. Will be nil iff the FSContext has been + // destroyed. + root *fs.Dirent + + // cwd is the current working directory. Will be nil iff the FSContext + // has been destroyed. + cwd *fs.Dirent + + // umask is the current file mode creation mask. When a thread using this + // context invokes a syscall that creates a file, bits set in umask are + // removed from the permissions that the file is created with. + umask uint +} + +// newFSContext returns a new filesystem context. +func newFSContext(root, cwd *fs.Dirent, umask uint) *FSContext { + root.IncRef() + cwd.IncRef() + return &FSContext{ + root: root, + cwd: cwd, + umask: umask, + } +} + +// destroy is the destructor for an FSContext. +// +// This will call DecRef on both root and cwd Dirents. If either call to +// DecRef returns an error, then it will be propigated. If both calls to +// DecRef return an error, then the one from root.DecRef will be propigated. +// +// Note that there may still be calls to WorkingDirectory() or RootDirectory() +// (that return nil). This is because valid references may still be held via +// proc files or other mechanisms. +func (f *FSContext) destroy() { + f.root.DecRef() + f.root = nil + + f.cwd.DecRef() + f.cwd = nil +} + +// DecRef implements RefCounter.DecRef with destructor f.destroy. +func (f *FSContext) DecRef() { + f.DecRefWithDestructor(f.destroy) +} + +// Fork forks this FSContext. +// +// This is not a valid call after destroy. +func (f *FSContext) Fork() *FSContext { + f.mu.Lock() + defer f.mu.Unlock() + f.cwd.IncRef() + f.root.IncRef() + return &FSContext{ + cwd: f.cwd, + root: f.root, + umask: f.umask, + } +} + +// WorkingDirectory returns the current working directory. +// You should call DecRef on the returned Dirent when finished. +// +// This will return nil if called after destroy(). +func (f *FSContext) WorkingDirectory() *fs.Dirent { + f.mu.Lock() + defer f.mu.Unlock() + if f.cwd != nil { + f.cwd.IncRef() + } + return f.cwd +} + +// SetWorkingDirectory sets the current working directory. +// This will take an extra reference on the Dirent. +// +// This is not a valid call after destroy. +func (f *FSContext) SetWorkingDirectory(d *fs.Dirent) { + if d == nil { + panic("FSContext.SetWorkingDirectory called with nil dirent") + } + if f.cwd == nil { + panic(fmt.Sprintf("FSContext.SetWorkingDirectory(%v)) called after destroy", d)) + } + f.mu.Lock() + defer f.mu.Unlock() + old := f.cwd + f.cwd = d + d.IncRef() + old.DecRef() +} + +// RootDirectory returns the current filesystem root. +// You should call DecRef on the returned Dirent when finished. +// +// This will return nil if called after destroy(). +func (f *FSContext) RootDirectory() *fs.Dirent { + f.mu.Lock() + defer f.mu.Unlock() + f.root.IncRef() + return f.root +} + +// SetRootDirectory sets the root directory. +// This will take an extra reference on the Dirent. +// +// This is not a valid call after free. +func (f *FSContext) SetRootDirectory(d *fs.Dirent) { + if d == nil { + panic("FSContext.SetRootDirectory called with nil dirent") + } + if f.root == nil { + panic(fmt.Sprintf("FSContext.SetRootDirectory(%v)) called after destroy", d)) + } + f.mu.Lock() + defer f.mu.Unlock() + old := f.root + f.root = d + d.IncRef() + old.DecRef() +} + +// Umask returns the current umask. +func (f *FSContext) Umask() uint { + f.mu.Lock() + defer f.mu.Unlock() + return f.umask +} + +// SwapUmask atomically sets the current umask and returns the old umask. +func (f *FSContext) SwapUmask(mask uint) uint { + f.mu.Lock() + defer f.mu.Unlock() + old := f.umask + f.umask = mask + return old +} diff --git a/pkg/sentry/kernel/futex/BUILD b/pkg/sentry/kernel/futex/BUILD new file mode 100644 index 000000000..de9897c58 --- /dev/null +++ b/pkg/sentry/kernel/futex/BUILD @@ -0,0 +1,48 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_template_instance( + name = "waiter_list", + out = "waiter_list.go", + package = "futex", + prefix = "waiter", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*Waiter", + }, +) + +go_stateify( + name = "futex_state", + srcs = [ + "futex.go", + "waiter_list.go", + ], + out = "futex_state.go", + package = "futex", +) + +go_library( + name = "futex", + srcs = [ + "futex.go", + "futex_state.go", + "waiter_list.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/state", + "//pkg/syserror", + ], +) + +go_test( + name = "futex_test", + size = "small", + srcs = ["futex_test.go"], + embed = [":futex"], +) diff --git a/pkg/sentry/kernel/futex/futex.go b/pkg/sentry/kernel/futex/futex.go new file mode 100644 index 000000000..b3ba57a2c --- /dev/null +++ b/pkg/sentry/kernel/futex/futex.go @@ -0,0 +1,405 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package futex provides an implementation of the futex interface as found in +// the Linux kernel. It allows one to easily transform Wait() calls into waits +// on a channel, which is useful in a Go-based kernel, for example. +package futex + +import ( + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Checker abstracts memory accesses. This is useful because the "addresses" +// used in this package may not be real addresses (they could be indices of an +// array, for example), or they could be mapped via some special mechanism. +// +// TODO: Replace this with usermem.IO. +type Checker interface { + // Check should validate that given address contains the given value. + // If it does not contain the value, syserror.EAGAIN must be returned. + // Any other error may be returned, which will be propagated. + Check(addr uintptr, val uint32) error + + // Op should atomically perform the operation encoded in op on the data + // pointed to by addr, then apply the comparison encoded in op to the + // original value at addr, returning the result. + // Note that op is an opaque operation whose behaviour is defined + // outside of the futex manager. + Op(addr uintptr, op uint32) (bool, error) +} + +// Waiter is the struct which gets enqueued into buckets for wake up routines +// and requeue routines to scan and notify. Once a Waiter has been enqueued by +// WaitPrepare(), callers may listen on C for wake up events. +type Waiter struct { + // Synchronization: + // + // - A Waiter that is not enqueued in a bucket is exclusively owned (no + // synchronization applies). + // + // - A Waiter is enqueued in a bucket by calling WaitPrepare(). After this, + // waiterEntry, complete, and addr are protected by the bucket.mu ("bucket + // lock") of the containing bucket, and bitmask is immutable. complete and + // addr are additionally mutated using atomic memory operations, ensuring + // that they can be read using atomic memory operations without holding the + // bucket lock. + // + // - A Waiter is only guaranteed to be no longer queued after calling + // WaitComplete(). + + // waiterEntry links Waiter into bucket.waiters. + waiterEntry + + // complete is 1 if the Waiter was removed from its bucket by a wakeup and + // 0 otherwise. + complete int32 + + // C is sent to when the Waiter is woken. + C chan struct{} + + // addr is the address being waited on. + addr uintptr + + // The bitmask we're waiting on. + // This is used the case of a FUTEX_WAKE_BITSET. + bitmask uint32 +} + +// NewWaiter returns a new unqueued Waiter. +func NewWaiter() *Waiter { + return &Waiter{ + C: make(chan struct{}, 1), + } +} + +// bucket holds a list of waiters for a given address hash. +type bucket struct { + // mu protects waiters and contained Waiter state. See comment in Waiter. + mu sync.Mutex `state:"nosave"` + + waiters waiterList `state:"zerovalue"` +} + +// wakeLocked wakes up to n waiters matching the bitmask at the addr for this +// bucket and returns the number of waiters woken. +// +// Preconditions: b.mu must be locked. +func (b *bucket) wakeLocked(addr uintptr, bitmask uint32, n int) int { + done := 0 + for w := b.waiters.Front(); done < n && w != nil; { + if w.addr != addr || w.bitmask&bitmask == 0 { + // Not matching. + w = w.Next() + continue + } + + // Remove from the bucket and wake the waiter. + woke := w + w = w.Next() // Next iteration. + b.waiters.Remove(woke) + woke.C <- struct{}{} + + // NOTE: The above channel write establishes a write barrier + // according to the memory model, so nothing may be ordered + // around it. Since we've dequeued w and will never touch it + // again, we can safely store 1 to w.complete here and allow + // the WaitComplete() to short-circuit grabbing the bucket + // lock. If they somehow miss the w.complete, we are still + // holding the lock, so we can know that they won't dequeue w, + // assume it's free and have the below operation afterwards. + atomic.StoreInt32(&woke.complete, 1) + done++ + } + return done +} + +// requeueLocked takes n waiters from the bucket and moves them to naddr on the +// bucket "to". +// +// Preconditions: b and to must be locked. +func (b *bucket) requeueLocked(to *bucket, addr, naddr uintptr, n int) int { + done := 0 + for w := b.waiters.Front(); done < n && w != nil; { + if w.addr != addr { + // Not matching. + w = w.Next() + continue + } + + requeued := w + w = w.Next() // Next iteration. + b.waiters.Remove(requeued) + atomic.StoreUintptr(&requeued.addr, naddr) + to.waiters.PushBack(requeued) + done++ + } + return done +} + +const ( + // bucketCount is the number of buckets per Manager. By having many of + // these we reduce contention when concurrent yet unrelated calls are made. + bucketCount = 1 << bucketCountBits + bucketCountBits = 10 +) + +func checkAddr(addr uintptr) error { + // Ensure the address is aligned. + // It must be a DWORD boundary. + if addr&0x3 != 0 { + return syserror.EINVAL + } + + return nil +} + +// bucketIndexForAddr returns the index into Manager.buckets for addr. +func bucketIndexForAddr(addr uintptr) uintptr { + // - The bottom 2 bits of addr must be 0, per checkAddr. + // + // - On amd64, the top 16 bits of addr (bits 48-63) must be equal to bit 47 + // for a canonical address, and (on all existing platforms) bit 47 must be + // 0 for an application address. + // + // Thus 19 bits of addr are "useless" for hashing, leaving only 45 "useful" + // bits. We choose one of the simplest possible hash functions that at + // least uses all 45 useful bits in the output, given that bucketCountBits + // == 10. This hash function also has the property that it will usually map + // adjacent addresses to adjacent buckets, slightly improving memory + // locality when an application synchronization structure uses multiple + // nearby futexes. + // + // Note that despite the large number of arithmetic operations in the + // function, many components can be computed in parallel, such that the + // critical path is 1 bit shift + 3 additions (2 in h1, then h1 + h2). This + // is also why h1 and h2 are grouped separately; for "(addr >> 2) + ... + + // (addr >> 42)" without any additional grouping, the compiler puts all 4 + // additions in the critical path. + h1 := (addr >> 2) + (addr >> 12) + (addr >> 22) + h2 := (addr >> 32) + (addr >> 42) + return (h1 + h2) % bucketCount +} + +// Manager holds futex state for a single virtual address space. +type Manager struct { + buckets [bucketCount]bucket +} + +// NewManager returns an initialized futex manager. +// N.B. we use virtual address to tag futexes, so it only works for private +// (within a single process) futex. +func NewManager() *Manager { + return &Manager{} +} + +// lockBucket returns a locked bucket for the given addr. +// +// Preconditions: checkAddr(addr) == nil. +func (m *Manager) lockBucket(addr uintptr) *bucket { + b := &m.buckets[bucketIndexForAddr(addr)] + b.mu.Lock() + return b +} + +// lockBuckets returns locked buckets for the given addrs. +// +// Preconditions: checkAddr(addr1) == checkAddr(addr2) == nil. +func (m *Manager) lockBuckets(addr1 uintptr, addr2 uintptr) (*bucket, *bucket) { + i1 := bucketIndexForAddr(addr1) + i2 := bucketIndexForAddr(addr2) + b1 := &m.buckets[i1] + b2 := &m.buckets[i2] + + // Ensure that buckets are locked in a consistent order (lowest index + // first) to avoid circular locking. + switch { + case i1 < i2: + b1.mu.Lock() + b2.mu.Lock() + case i2 < i1: + b2.mu.Lock() + b1.mu.Lock() + default: + b1.mu.Lock() + } + + return b1, b2 +} + +// Wake wakes up to n waiters matching the bitmask on the given addr. +// The number of waiters woken is returned. +func (m *Manager) Wake(addr uintptr, bitmask uint32, n int) (int, error) { + if err := checkAddr(addr); err != nil { + return 0, err + } + + b := m.lockBucket(addr) + // This function is very hot; avoid defer. + r := b.wakeLocked(addr, bitmask, n) + b.mu.Unlock() + return r, nil +} + +func (m *Manager) doRequeue(c Checker, addr uintptr, val uint32, naddr uintptr, nwake int, nreq int) (int, error) { + if err := checkAddr(addr); err != nil { + return 0, err + } + if err := checkAddr(naddr); err != nil { + return 0, err + } + + b1, b2 := m.lockBuckets(addr, naddr) + defer b1.mu.Unlock() + if b2 != b1 { + defer b2.mu.Unlock() + } + + // Check our value. + // This only applied for RequeueCmp(). + if c != nil { + if err := c.Check(addr, val); err != nil { + return 0, err + } + } + + // Wake the number required. + done := b1.wakeLocked(addr, ^uint32(0), nwake) + + // Requeue the number required. + b1.requeueLocked(b2, addr, naddr, nreq) + + return done, nil +} + +// Requeue wakes up to nwake waiters on the given addr, and unconditionally +// requeues up to nreq waiters on naddr. +func (m *Manager) Requeue(addr uintptr, naddr uintptr, nwake int, nreq int) (int, error) { + return m.doRequeue(nil, addr, 0, naddr, nwake, nreq) +} + +// RequeueCmp atomically checks that the addr contains val (via the Checker), +// wakes up to nwake waiters on addr and then unconditionally requeues nreq +// waiters on naddr. +func (m *Manager) RequeueCmp(c Checker, addr uintptr, val uint32, naddr uintptr, nwake int, nreq int) (int, error) { + return m.doRequeue(c, addr, val, naddr, nwake, nreq) +} + +// WakeOp atomically applies op to the memory address addr2, wakes up to nwake1 +// waiters unconditionally from addr1, and, based on the original value at addr2 +// and a comparison encoded in op, wakes up to nwake2 waiters from addr2. +// It returns the total number of waiters woken. +func (m *Manager) WakeOp(c Checker, addr1 uintptr, addr2 uintptr, nwake1 int, nwake2 int, op uint32) (int, error) { + if err := checkAddr(addr1); err != nil { + return 0, err + } + if err := checkAddr(addr2); err != nil { + return 0, err + } + + b1, b2 := m.lockBuckets(addr1, addr2) + + done := 0 + cond, err := c.Op(addr2, op) + if err == nil { + // Wake up up to nwake1 entries from the first bucket. + done = b1.wakeLocked(addr1, ^uint32(0), nwake1) + + // Wake up up to nwake2 entries from the second bucket if the + // operation yielded true. + if cond { + done += b2.wakeLocked(addr2, ^uint32(0), nwake2) + } + } + + b1.mu.Unlock() + if b2 != b1 { + b2.mu.Unlock() + } + return done, err +} + +// WaitPrepare atomically checks that addr contains val (via the Checker), then +// enqueues w to be woken by a send to w.C. If WaitPrepare returns nil, the +// Waiter must be subsequently removed by calling WaitComplete, whether or not +// a wakeup is received on w.C. +func (m *Manager) WaitPrepare(w *Waiter, c Checker, addr uintptr, val uint32, bitmask uint32) error { + if err := checkAddr(addr); err != nil { + return err + } + + // Prepare the Waiter before taking the bucket lock. + w.complete = 0 + select { + case <-w.C: + default: + } + w.addr = addr + w.bitmask = bitmask + + b := m.lockBucket(addr) + // This function is very hot; avoid defer. + + // Perform our atomic check. + if err := c.Check(addr, val); err != nil { + b.mu.Unlock() + return err + } + + // Add the waiter to the bucket. + b.waiters.PushBack(w) + + b.mu.Unlock() + return nil +} + +// WaitComplete must be called when a Waiter previously added by WaitPrepare is +// no longer eligible to be woken. +func (m *Manager) WaitComplete(w *Waiter) { + // Can we short-circuit acquiring the lock? + // This is the happy path where a notification + // was received and we don't need to dequeue this + // waiter from any list (or take any locks). + if atomic.LoadInt32(&w.complete) != 0 { + return + } + + // Take the bucket lock. Note that without holding the bucket lock, the + // waiter is not guaranteed to stay in that bucket, so after we take the + // bucket lock, we must ensure that the bucket hasn't changed: if it + // happens to have changed, we release the old bucket lock and try again + // with the new bucket; if it hasn't changed, we know it won't change now + // because we hold the lock. + var b *bucket + for { + addr := atomic.LoadUintptr(&w.addr) + b = m.lockBucket(addr) + // We still have to use an atomic load here, because if w was racily + // requeued then w.addr is not protected by b.mu. + if addr == atomic.LoadUintptr(&w.addr) { + break + } + b.mu.Unlock() + } + + // Remove waiter from the bucket. w.complete can only be stored with b.mu + // locked, so this load doesn't need to use sync/atomic. + if w.complete == 0 { + b.waiters.Remove(w) + } + b.mu.Unlock() +} diff --git a/pkg/sentry/kernel/futex/futex_test.go b/pkg/sentry/kernel/futex/futex_test.go new file mode 100644 index 000000000..7b81358ec --- /dev/null +++ b/pkg/sentry/kernel/futex/futex_test.go @@ -0,0 +1,500 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package futex + +import ( + "math" + "runtime" + "sync" + "sync/atomic" + "syscall" + "testing" + "unsafe" +) + +const ( + testMutexSize = 4 + testMutexLocked uint32 = 1 + testMutexUnlocked uint32 = 0 +) + +// testData implements the Checker interface, and allows us to +// treat the address passed for futex operations as an index in +// a byte slice for testing simplicity. +type testData []byte + +func newTestData(size uint) testData { + return make([]byte, size) +} + +func (t testData) Check(addr uintptr, val uint32) error { + if val != atomic.LoadUint32((*uint32)(unsafe.Pointer(&t[addr]))) { + return syscall.EAGAIN + } + return nil +} + +func (t testData) Op(addr uintptr, val uint32) (bool, error) { + return val == 0, nil +} + +// testMutex ties together a testData slice, an address, and a +// futex manager in order to implement the sync.Locker interface. +// Beyond being used as a Locker, this is a simple mechanism for +// changing the underlying values for simpler tests. +type testMutex struct { + a uintptr + d testData + m *Manager +} + +func newTestMutex(addr uintptr, d testData, m *Manager) *testMutex { + return &testMutex{a: addr, d: d, m: m} +} + +// Lock acquires the testMutex. +// This may wait for it to be available via the futex manager. +func (t *testMutex) Lock() { + for { + // Attempt to grab the lock. + if atomic.CompareAndSwapUint32( + ((*uint32)(unsafe.Pointer(&t.d[t.a]))), + testMutexUnlocked, + testMutexLocked) { + // Lock held. + return + } + + // Wait for it to be "not locked". + w := NewWaiter() + err := t.m.WaitPrepare(w, t.d, t.a, testMutexLocked, ^uint32(0)) + if err == syscall.EAGAIN { + continue + } + if err != nil { + // Should never happen. + panic("WaitPrepare returned unexpected error: " + err.Error()) + } + <-w.C + t.m.WaitComplete(w) + } +} + +// Unlock releases the testMutex. +// This will notify any waiters via the futex manager. +func (t *testMutex) Unlock() { + // Unlock. + atomic.StoreUint32(((*uint32)(unsafe.Pointer(&t.d[t.a]))), testMutexUnlocked) + + // Notify all waiters. + t.m.Wake(t.a, ^uint32(0), math.MaxInt32) +} + +func TestFutexWake(t *testing.T) { + m := NewManager() + d := newTestData(testMutexSize) + + // Wait for it to be locked. + // (This won't trigger the wake in testMutex) + w := NewWaiter() + m.WaitPrepare(w, d, 0, testMutexUnlocked, ^uint32(0)) + + // Wake the single thread. + if _, err := m.Wake(0, ^uint32(0), 1); err != nil { + t.Error("wake error:", err) + } + + <-w.C + m.WaitComplete(w) +} + +func TestFutexWakeBitmask(t *testing.T) { + m := NewManager() + d := newTestData(testMutexSize) + + // Wait for it to be locked. + // (This won't trigger the wake in testMutex) + w := NewWaiter() + m.WaitPrepare(w, d, 0, testMutexUnlocked, 0x0000ffff) + + // Wake the single thread, not using the bitmask. + if _, err := m.Wake(0, 0xffff0000, 1); err != nil { + t.Error("wake non-matching bitmask error:", err) + } + + select { + case <-w.C: + t.Error("w is alive?") + default: + } + + // Now use a matching bitmask. + if _, err := m.Wake(0, 0x00000001, 1); err != nil { + t.Error("wake matching bitmask error:", err) + } + + <-w.C + m.WaitComplete(w) +} + +func TestFutexWakeTwo(t *testing.T) { + m := NewManager() + d := newTestData(testMutexSize) + + // Wait for it to be locked. + // (This won't trigger the wake in testMutex) + w1 := NewWaiter() + w2 := NewWaiter() + w3 := NewWaiter() + m.WaitPrepare(w1, d, 0, testMutexUnlocked, ^uint32(0)) + m.WaitPrepare(w2, d, 0, testMutexUnlocked, ^uint32(0)) + m.WaitPrepare(w3, d, 0, testMutexUnlocked, ^uint32(0)) + + // Wake exactly two threads. + if _, err := m.Wake(0, ^uint32(0), 2); err != nil { + t.Error("wake error:", err) + } + + // Ensure exactly two are alive. + // We don't get guarantees about exactly which two, + // (although we expect them to be w1 and w2). + awake := 0 + for { + select { + case <-w1.C: + awake++ + case <-w2.C: + awake++ + case <-w3.C: + awake++ + default: + if awake != 2 { + t.Error("awake != 2?") + } + + // Success. + return + } + } +} + +func TestFutexWakeUnrelated(t *testing.T) { + m := NewManager() + d := newTestData(2 * testMutexSize) + + // Wait for it to be locked. + w1 := NewWaiter() + w2 := NewWaiter() + m.WaitPrepare(w1, d, 0*testMutexSize, testMutexUnlocked, ^uint32(0)) + m.WaitPrepare(w2, d, 1*testMutexSize, testMutexUnlocked, ^uint32(0)) + + // Wake only the second one. + if _, err := m.Wake(1*testMutexSize, ^uint32(0), 2); err != nil { + t.Error("wake error:", err) + } + + // Ensure only r2 is alive. + select { + case <-w1.C: + t.Error("w1 is alive?") + default: + } + <-w2.C +} + +// This function was shamelessly stolen from mutex_test.go. +func HammerMutex(l sync.Locker, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + l.Lock() + runtime.Gosched() + l.Unlock() + } + cdone <- true +} + +func TestFutexStress(t *testing.T) { + m := NewManager() + d := newTestData(testMutexSize) + tm := newTestMutex(0*testMutexSize, d, m) + c := make(chan bool) + + for i := 0; i < 10; i++ { + go HammerMutex(tm, 1000, c) + } + + for i := 0; i < 10; i++ { + <-c + } +} + +func TestWakeOpEmpty(t *testing.T) { + m := NewManager() + d := newTestData(8) + + n, err := m.WakeOp(d, 0, 4, 10, 10, 0) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 0 { + t.Fatalf("Invalid number of wakes: want 0, got %d", n) + } +} + +func TestWakeOpFirstNonEmpty(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add two waiters on address 0. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + // Wake up all waiters on address 0. + n, err := m.WakeOp(d, 0, 4, 10, 10, 0) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 2 { + t.Fatalf("Invalid number of wakes: want 2, got %d", n) + } +} + +func TestWakeOpSecondNonEmpty(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add two waiters on address 4. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + // Wake up all waiters on address 4. + n, err := m.WakeOp(d, 0, 4, 10, 10, 0) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 2 { + t.Fatalf("Invalid number of wakes: want 2, got %d", n) + } +} + +func TestWakeOpSecondNonEmptyFailingOp(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add two waiters on address 4. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + // Wake up all waiters on address 4. + n, err := m.WakeOp(d, 0, 4, 10, 10, 1) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 0 { + t.Fatalf("Invalid number of wakes: want 0, got %d", n) + } +} + +func TestWakeOpAllNonEmpty(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add two waiters on address 0. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + // Add two waiters on address 4. + w3 := NewWaiter() + if err := m.WaitPrepare(w3, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w3) + + w4 := NewWaiter() + if err := m.WaitPrepare(w4, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w4) + + // Wake up all waiters on both addresses. + n, err := m.WakeOp(d, 0, 4, 10, 10, 0) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 4 { + t.Fatalf("Invalid number of wakes: want 4, got %d", n) + } +} + +func TestWakeOpAllNonEmptyFailingOp(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add two waiters on address 0. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + // Add two waiters on address 4. + w3 := NewWaiter() + if err := m.WaitPrepare(w3, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w3) + + w4 := NewWaiter() + if err := m.WaitPrepare(w4, d, 4, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w4) + + // Wake up all waiters on both addresses. + n, err := m.WakeOp(d, 0, 4, 10, 10, 1) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 2 { + t.Fatalf("Invalid number of wakes: want 2, got %d", n) + } +} + +func TestWakeOpSameAddress(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add four waiters on address 0. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + w3 := NewWaiter() + if err := m.WaitPrepare(w3, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w3) + + w4 := NewWaiter() + if err := m.WaitPrepare(w4, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w4) + + // Use the same address, with one at most one waiter from each. + n, err := m.WakeOp(d, 0, 0, 1, 1, 0) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 2 { + t.Fatalf("Invalid number of wakes: want 2, got %d", n) + } +} + +func TestWakeOpSameAddressFailingOp(t *testing.T) { + m := NewManager() + d := newTestData(8) + + // Add four waiters on address 0. + w1 := NewWaiter() + if err := m.WaitPrepare(w1, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w1) + + w2 := NewWaiter() + if err := m.WaitPrepare(w2, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w2) + + w3 := NewWaiter() + if err := m.WaitPrepare(w3, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w3) + + w4 := NewWaiter() + if err := m.WaitPrepare(w4, d, 0, 0, ^uint32(0)); err != nil { + t.Fatalf("WaitPrepare failed: %v", err) + } + defer m.WaitComplete(w4) + + // Use the same address, with one at most one waiter from each. + n, err := m.WakeOp(d, 0, 0, 1, 1, 1) + if err != nil { + t.Fatalf("WakeOp failed: %v", err) + } + + if n != 1 { + t.Fatalf("Invalid number of wakes: want 1, got %d", n) + } +} diff --git a/pkg/sentry/kernel/g3doc/run_states.dot b/pkg/sentry/kernel/g3doc/run_states.dot new file mode 100644 index 000000000..7861fe1f5 --- /dev/null +++ b/pkg/sentry/kernel/g3doc/run_states.dot @@ -0,0 +1,99 @@ +digraph { + subgraph { + App; + } + subgraph { + Interrupt; + InterruptAfterSignalDeliveryStop; + } + subgraph { + Syscall; + SyscallAfterPtraceEventSeccomp; + SyscallEnter; + SyscallAfterSyscallEnterStop; + SyscallAfterSysemuStop; + SyscallInvoke; + SyscallAfterPtraceEventClone; + SyscallAfterExecStop; + SyscallAfterVforkStop; + SyscallReinvoke; + SyscallExit; + } + subgraph { + Vsyscall; + VsyscallAfterPtraceEventSeccomp; + VsyscallInvoke; + } + subgraph { + Exit; + ExitMain; // leave thread group, release resources, reparent children, kill PID namespace and wait if TGID 1 + ExitNotify; // signal parent/tracer, become waitable + ExitDone; // represented by t.runState == nil + } + + // Task exit + Exit -> ExitMain; + ExitMain -> ExitNotify; + ExitNotify -> ExitDone; + + // Execution of untrusted application code + App -> App; + + // Interrupts (usually signal delivery) + App -> Interrupt; + Interrupt -> Interrupt; // if other interrupt conditions may still apply + Interrupt -> Exit; // if killed + + // Syscalls + App -> Syscall; + Syscall -> SyscallEnter; + SyscallEnter -> SyscallInvoke; + SyscallInvoke -> SyscallExit; + SyscallExit -> App; + + // exit, exit_group + SyscallInvoke -> Exit; + + // execve + SyscallInvoke -> SyscallAfterExecStop; + SyscallAfterExecStop -> SyscallExit; + SyscallAfterExecStop -> App; // fatal signal pending + + // vfork + SyscallInvoke -> SyscallAfterVforkStop; + SyscallAfterVforkStop -> SyscallExit; + + // Vsyscalls + App -> Vsyscall; + Vsyscall -> VsyscallInvoke; + Vsyscall -> App; // fault while reading return address from stack + VsyscallInvoke -> App; + + // ptrace-specific branches + Interrupt -> InterruptAfterSignalDeliveryStop; + InterruptAfterSignalDeliveryStop -> Interrupt; + SyscallEnter -> SyscallAfterSyscallEnterStop; + SyscallAfterSyscallEnterStop -> SyscallInvoke; + SyscallAfterSyscallEnterStop -> SyscallExit; // skipped by tracer + SyscallAfterSyscallEnterStop -> App; // fatal signal pending + SyscallEnter -> SyscallAfterSysemuStop; + SyscallAfterSysemuStop -> SyscallExit; + SyscallAfterSysemuStop -> App; // fatal signal pending + SyscallInvoke -> SyscallAfterPtraceEventClone; + SyscallAfterPtraceEventClone -> SyscallExit; + SyscallAfterPtraceEventClone -> SyscallAfterVforkStop; + + // seccomp + Syscall -> App; // SECCOMP_RET_TRAP, SECCOMP_RET_ERRNO, SECCOMP_RET_KILL, SECCOMP_RET_TRACE without tracer + Syscall -> SyscallAfterPtraceEventSeccomp; // SECCOMP_RET_TRACE + SyscallAfterPtraceEventSeccomp -> SyscallEnter; + SyscallAfterPtraceEventSeccomp -> SyscallExit; // skipped by tracer + SyscallAfterPtraceEventSeccomp -> App; // fatal signal pending + Vsyscall -> VsyscallAfterPtraceEventSeccomp; + VsyscallAfterPtraceEventSeccomp -> VsyscallInvoke; + VsyscallAfterPtraceEventSeccomp -> App; + + // Autosave + SyscallInvoke -> SyscallReinvoke; + SyscallReinvoke -> SyscallInvoke; +} diff --git a/pkg/sentry/kernel/ipc_namespace.go b/pkg/sentry/kernel/ipc_namespace.go new file mode 100644 index 000000000..78737f58f --- /dev/null +++ b/pkg/sentry/kernel/ipc_namespace.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/semaphore" +) + +// IPCNamespace represents an IPC namespace. +type IPCNamespace struct { + semaphores *semaphore.Registry +} + +// NewIPCNamespace creates a new IPC namespace. +func NewIPCNamespace() *IPCNamespace { + return &IPCNamespace{ + semaphores: semaphore.NewRegistry(), + } +} + +// SemaphoreRegistry returns the semanphore set registry for this namespace. +func (i *IPCNamespace) SemaphoreRegistry() *semaphore.Registry { + return i.semaphores +} + +// IPCNamespace returns the task's IPC namespace. +func (t *Task) IPCNamespace() *IPCNamespace { + t.mu.Lock() + defer t.mu.Unlock() + return t.ipcns +} diff --git a/pkg/sentry/kernel/kdefs/BUILD b/pkg/sentry/kernel/kdefs/BUILD new file mode 100644 index 000000000..b6c00042a --- /dev/null +++ b/pkg/sentry/kernel/kdefs/BUILD @@ -0,0 +1,10 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "kdefs", + srcs = ["kdefs.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs", + visibility = ["//:sandbox"], +) diff --git a/pkg/sentry/kernel/kdefs/kdefs.go b/pkg/sentry/kernel/kdefs/kdefs.go new file mode 100644 index 000000000..bbb476544 --- /dev/null +++ b/pkg/sentry/kernel/kdefs/kdefs.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kdefs defines common kernel definitions. +// +package kdefs + +// FD is a File Descriptor. +type FD int32 diff --git a/pkg/sentry/kernel/kernel.go b/pkg/sentry/kernel/kernel.go new file mode 100644 index 000000000..0932965e0 --- /dev/null +++ b/pkg/sentry/kernel/kernel.go @@ -0,0 +1,957 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kernel provides an emulation of the Linux kernel. +// +// See README.md for a detailed overview. +// +// Lock order (outermost locks must be taken first): +// +// Kernel.extMu +// TaskSet.mu +// SignalHandlers.mu +// Task.mu +// +// Locking SignalHandlers.mu in multiple SignalHandlers requires locking +// TaskSet.mu exclusively first. Locking Task.mu in multiple Tasks at the same +// time requires locking all of their signal mutexes first. +package kernel + +import ( + "fmt" + "io" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/timerfd" + "gvisor.googlesource.com/gvisor/pkg/sentry/hostcpu" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/epoll" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/loader" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink/port" + sentrytime "gvisor.googlesource.com/gvisor/pkg/sentry/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/state" +) + +// Kernel represents an emulated Linux kernel. It must be initialized by calling +// Init() or LoadFrom(). +type Kernel struct { + // extMu serializes external changes to the Kernel with calls to + // Kernel.SaveTo. (Kernel.SaveTo requires that the state of the Kernel + // remains frozen for the duration of the call; it requires that the Kernel + // is paused as a precondition, which ensures that none of the tasks + // running within the Kernel can affect its state, but extMu is required to + // ensure that concurrent users of the Kernel *outside* the Kernel's + // control cannot affect its state by calling e.g. + // Kernel.SendExternalSignal.) + extMu sync.Mutex `state:"nosave"` + + // started is true if Start has been called. Unless otherwise specified, + // all Kernel fields become immutable once started becomes true. + started bool `state:"nosave"` + + // All of the following fields are immutable unless otherwise specified. + + // Platform is the platform that is used to execute tasks in the + // created Kernel. It is embedded so that Kernel can directly serve as + // Platform in mm logic and also serve as platform.MemoryProvider in + // filemem S/R logic. + platform.Platform `state:"nosave"` + + // See InitKernelArgs for the meaning of these fields. + featureSet *cpuid.FeatureSet + timekeeper *Timekeeper + tasks *TaskSet + rootUserNamespace *auth.UserNamespace + networkStack inet.Stack `state:"nosave"` + applicationCores uint + useHostCores bool + extraAuxv []arch.AuxEntry + vdso *loader.VDSO + rootUTSNamespace *UTSNamespace + rootIPCNamespace *IPCNamespace + + // mounts holds the state of the virtual filesystem. mounts is initially + // nil, and must be set by calling Kernel.SetRootMountNamespace before + // Kernel.CreateProcess can succeed. + mounts *fs.MountNamespace + + // globalInit is the thread group whose leader has ID 1 in the root PID + // namespace. globalInit is stored separately so that it is accessible even + // after all tasks in the thread group have exited, such that ID 1 is no + // longer mapped. + // + // globalInit is mutable until it is assigned by the first successful call + // to CreateProcess, and is protected by extMu. + globalInit *ThreadGroup + + // realtimeClock is a ktime.Clock based on timekeeper's Realtime. + realtimeClock *timekeeperClock + + // monotonicClock is a ktime.Clock based on timekeeper's Monotonic. + monotonicClock *timekeeperClock + + // syslog is the kernel log. + syslog syslog + + // cpuClock is incremented every linux.ClockTick. cpuClock is used to + // measure task CPU usage, since sampling monotonicClock twice on every + // syscall turns out to be unreasonably expensive. This is similar to how + // Linux does task CPU accounting on x86 (CONFIG_IRQ_TIME_ACCOUNTING), + // although Linux also uses scheduler timing information to improve + // resolution (kernel/sched/cputime.c:cputime_adjust()), which we can't do + // since "preeemptive" scheduling is managed by the Go runtime, which + // doesn't provide this information. + // + // cpuClock is mutable, and is accessed using atomic memory operations. + cpuClock uint64 + + // cpuClockTicker increments cpuClock. + cpuClockTicker *ktime.Timer `state:"nosave"` + + // fdMapUids is an ever-increasing counter for generating FDMap uids. + // + // fdMapUids is mutable, and is accessed using atomic memory operations. + fdMapUids uint64 + + // uniqueID is used to generate unique identifiers. + // + // uniqueID is mutable, and is accessed using atomic memory operations. + uniqueID uint64 + + // nextInotifyCookie is a monotonically increasing counter used for + // generating unique inotify event cookies. + // + // nextInotifyCookie is mutable, and is accesed using atomic memory + // operations. + nextInotifyCookie uint32 + + // netlinkPorts manages allocation of netlink socket port IDs. + netlinkPorts *port.Manager + + // exitErr is the error causing the sandbox to exit, if any. It is + // protected by extMu. + exitErr error +} + +// InitKernelArgs holds arguments to Init. +type InitKernelArgs struct { + // FeatureSet is the emulated CPU feature set. + FeatureSet *cpuid.FeatureSet + + // Timekeeper manages time for all tasks in the system. + Timekeeper *Timekeeper + + // RootUserNamespace is the root user namespace. + RootUserNamespace *auth.UserNamespace + + // NetworkStack is the TCP/IP network stack. NetworkStack may be nil. + NetworkStack inet.Stack + + // ApplicationCores is the number of logical CPUs visible to sandboxed + // applications. The set of logical CPU IDs is [0, ApplicationCores); thus + // ApplicationCores is analogous to Linux's nr_cpu_ids, the index of the + // most significant bit in cpu_possible_mask + 1. + ApplicationCores uint + + // If UseHostCores is true, Task.CPU() returns the task goroutine's CPU + // instead of a virtualized CPU number, and Task.CopyToCPUMask() is a + // no-op. If ApplicationCores is less than hostcpu.MaxPossibleCPU(), it + // will be overridden. + UseHostCores bool + + // ExtraAuxv contains additional auxiliary vector entries that are added to + // each process by the ELF loader. + ExtraAuxv []arch.AuxEntry + + // Vdso holds the VDSO and its parameter page. + Vdso *loader.VDSO + + // RootUTSNamespace is the root UTS namepsace. + RootUTSNamespace *UTSNamespace + + // RootIPCNamespace is the root IPC namepsace. + RootIPCNamespace *IPCNamespace +} + +// Init initialize the Kernel with no tasks. +// +// Callers must manually set Kernel.Platform before caling Init. +func (k *Kernel) Init(args InitKernelArgs) error { + if args.FeatureSet == nil { + return fmt.Errorf("FeatureSet is nil") + } + if args.Timekeeper == nil { + return fmt.Errorf("Timekeeper is nil") + } + if args.RootUserNamespace == nil { + return fmt.Errorf("RootUserNamespace is nil") + } + if args.ApplicationCores == 0 { + return fmt.Errorf("ApplicationCores is 0") + } + + k.featureSet = args.FeatureSet + k.timekeeper = args.Timekeeper + k.tasks = newTaskSet() + k.rootUserNamespace = args.RootUserNamespace + k.rootUTSNamespace = args.RootUTSNamespace + k.rootIPCNamespace = args.RootIPCNamespace + k.networkStack = args.NetworkStack + k.applicationCores = args.ApplicationCores + if args.UseHostCores { + k.useHostCores = true + maxCPU, err := hostcpu.MaxPossibleCPU() + if err != nil { + return fmt.Errorf("Failed to get maximum CPU number: %v", err) + } + minAppCores := uint(maxCPU) + 1 + if k.applicationCores < minAppCores { + log.Infof("UseHostCores enabled: increasing ApplicationCores from %d to %d", k.applicationCores, minAppCores) + k.applicationCores = minAppCores + } + } + k.extraAuxv = args.ExtraAuxv + k.vdso = args.Vdso + k.realtimeClock = &timekeeperClock{tk: args.Timekeeper, c: sentrytime.Realtime} + k.monotonicClock = &timekeeperClock{tk: args.Timekeeper, c: sentrytime.Monotonic} + k.netlinkPorts = port.New() + + return nil +} + +// SaveTo saves the state of k to w. +// +// Preconditions: The kernel must be paused throughout the call to SaveTo. +func (k *Kernel) SaveTo(w io.Writer) error { + saveStart := time.Now() + ctx := k.SupervisorContext() + + // Do not allow other Kernel methods to affect it while it's being saved. + k.extMu.Lock() + defer k.extMu.Unlock() + + // Stop time. + k.pauseTimeLocked() + defer k.resumeTimeLocked() + + // Flush write operations on open files so data reaches backing storage. + if err := k.tasks.flushWritesToFiles(ctx); err != nil { + return err + } + + // Remove all epoll waiter objects from underlying wait queues. + // NOTE: for programs to resume execution in future snapshot scenarios, + // we will need to re-establish these waiter objects after saving. + k.tasks.unregisterEpollWaiters() + + // Clear the dirent cache before saving because Dirents must be Loaded in a + // particular order (parents before children), and Loading dirents from a cache + // breaks that order. + k.mounts.FlushMountSourceRefs() + + // Ensure that all pending asynchronous work is complete: + // - inode and mount release + // - asynchronuous IO + fs.AsyncBarrier() + + // Once all fs work has completed (flushed references have all been released), + // reset mount mappings. This allows individual mounts to save how inodes map + // to filesystem resources. Without this, fs.Inodes cannot be restored. + fs.SaveInodeMappings() + + // Discard unsavable mappings, such as those for host file descriptors. + // This must be done after waiting for "asynchronous fs work", which + // includes async I/O that may touch application memory. + if err := k.invalidateUnsavableMappings(ctx); err != nil { + return fmt.Errorf("failed to invalidate unsavable mappings: %v", err) + } + + // Save the kernel state. + kernelStart := time.Now() + var stats state.Stats + if err := state.Save(w, k, &stats); err != nil { + return err + } + log.Infof("Kernel save stats: %s", &stats) + log.Infof("Kernel save took [%s].", time.Since(kernelStart)) + + // Save the memory state. + // + // FIXME: In the future, this should not be dispatched via + // an abstract memory type. This should be dispatched to a single + // memory implementation that belongs to the kernel. (There is + // currently a single implementation anyways, it just needs to be + // "unabstracted" and reparented appropriately.) + memoryStart := time.Now() + if err := k.Platform.Memory().SaveTo(w); err != nil { + return err + } + log.Infof("Memory save took [%s].", time.Since(memoryStart)) + + log.Infof("Overall save took [%s].", time.Since(saveStart)) + + return nil +} + +func (ts *TaskSet) flushWritesToFiles(ctx context.Context) error { + ts.mu.RLock() + defer ts.mu.RUnlock() + for t := range ts.Root.tids { + if fdmap := t.FDMap(); fdmap != nil { + for _, desc := range fdmap.files { + if flags := desc.file.Flags(); !flags.Write { + continue + } + if sattr := desc.file.Dirent.Inode.StableAttr; !fs.IsFile(sattr) && !fs.IsDir(sattr) { + continue + } + // Here we need all metadata synced. + syncErr := desc.file.Fsync(ctx, 0, fs.FileMaxOffset, fs.SyncAll) + if err := fs.SaveFileFsyncError(syncErr); err != nil { + name, _ := desc.file.Dirent.FullName(nil /* root */) + return fmt.Errorf("%q was not sufficiently synced: %v", name, err) + } + } + } + } + return nil +} + +// Preconditions: The kernel must be paused. +func (k *Kernel) invalidateUnsavableMappings(ctx context.Context) error { + invalidated := make(map[*mm.MemoryManager]struct{}) + k.tasks.mu.RLock() + defer k.tasks.mu.RUnlock() + for t := range k.tasks.Root.tids { + // We can skip locking Task.mu here since the kernel is paused. + if mm := t.tc.MemoryManager; mm != nil { + if _, ok := invalidated[mm]; !ok { + if err := mm.InvalidateUnsavable(ctx); err != nil { + return err + } + invalidated[mm] = struct{}{} + } + } + // I really wish we just had a sync.Map of all MMs... + if r, ok := t.runState.(*runSyscallAfterExecStop); ok { + if err := r.tc.MemoryManager.InvalidateUnsavable(ctx); err != nil { + return err + } + } + } + return nil +} + +func (ts *TaskSet) unregisterEpollWaiters() { + ts.mu.RLock() + defer ts.mu.RUnlock() + for t := range ts.Root.tids { + if fdmap := t.FDMap(); fdmap != nil { + for _, desc := range fdmap.files { + if desc.file != nil { + if e, ok := desc.file.FileOperations.(*epoll.EventPoll); ok { + e.UnregisterEpollWaiters() + } + } + } + } + } +} + +// LoadFrom returns a new Kernel loaded from args. +func (k *Kernel) LoadFrom(r io.Reader, p platform.Platform, net inet.Stack) error { + loadStart := time.Now() + if p == nil { + return fmt.Errorf("Platform is nil") + } + + k.Platform = p + k.networkStack = net + + initAppCores := k.applicationCores + + // Load the kernel state. + kernelStart := time.Now() + var stats state.Stats + if err := state.Load(r, k, &stats); err != nil { + return err + } + log.Infof("Kernel load stats: %s", &stats) + log.Infof("Kernel load took [%s].", time.Since(kernelStart)) + + // Load the memory state. + // + // See the note in SaveTo. + memoryStart := time.Now() + if err := k.Platform.Memory().LoadFrom(r); err != nil { + return err + } + log.Infof("Memory load took [%s].", time.Since(memoryStart)) + + // Ensure that all pending asynchronous work is complete: + // - namedpipe opening + // - inode file opening + fs.AsyncBarrier() + + log.Infof("Overall load took [%s]", time.Since(loadStart)) + + // Applications may size per-cpu structures based on k.applicationCores, so + // it can't change across save/restore. When we are virtualizing CPU + // numbers, this isn't a problem. However, when we are exposing host CPU + // assignments, we can't tolerate an increase in the number of host CPUs, + // which could result in getcpu(2) returning CPUs that applications expect + // not to exist. + if k.useHostCores && initAppCores > k.applicationCores { + return fmt.Errorf("UseHostCores enabled: can't increase ApplicationCores from %d to %d after restore", k.applicationCores, initAppCores) + } + + return nil +} + +// Destroy releases resources owned by k. +// +// Preconditions: There must be no task goroutines running in k. +func (k *Kernel) Destroy() { + if k.mounts != nil { + k.mounts.DecRef() + k.mounts = nil + } +} + +// UniqueID returns a unique identifier. +func (k *Kernel) UniqueID() uint64 { + id := atomic.AddUint64(&k.uniqueID, 1) + if id == 0 { + panic("unique identifier generator wrapped around") + } + return id +} + +// CreateProcessArgs holds arguments to kernel.CreateProcess. +type CreateProcessArgs struct { + // Filename is the filename to load. + // + // If this is provided as "", then the file will be guessed via Argv[0]. + Filename string + + // Argvv is a list of arguments. + Argv []string + + // Envv is a list of environment variables. + Envv []string + + // WorkingDirectory is the initial working directory. + // + // This defaults to the root if empty. + WorkingDirectory string + + // Credentials is the initial credentials. + Credentials *auth.Credentials + + // FDMap is the initial set of file descriptors. If CreateProcess succeeds, + // it takes a reference on FDMap. + FDMap *FDMap + + // Umask is the initial umask. + Umask uint + + // Limits is the initial resource limits. + Limits *limits.LimitSet + + // MaxSymlinkTraversals is the maximum number of symlinks to follow + // during resolution. + MaxSymlinkTraversals uint + + // UTSNamespace is the initial UTS namespace. + UTSNamespace *UTSNamespace + + // IPCNamespace is the initial IPC namespace. + IPCNamespace *IPCNamespace +} + +// NewContext returns a context.Context that represents the task that will be +// created by args.NewContext(k). +func (args *CreateProcessArgs) NewContext(k *Kernel) *createProcessContext { + return &createProcessContext{ + Logger: log.Log(), + k: k, + args: args, + } +} + +// createProcessContext is a context.Context that represents the context +// associated with a task that is being created. +type createProcessContext struct { + context.NoopSleeper + log.Logger + k *Kernel + args *CreateProcessArgs +} + +// Value implements context.Context.Value. +func (ctx *createProcessContext) Value(key interface{}) interface{} { + switch key { + case CtxKernel: + return ctx.k + case CtxPIDNamespace: + // "The new task ... is in the root PID namespace." - + // Kernel.CreateProcess + return ctx.k.tasks.Root + case CtxUTSNamespace: + return ctx.args.UTSNamespace + case CtxIPCNamespace: + return ctx.args.IPCNamespace + case auth.CtxCredentials: + return ctx.args.Credentials + case fs.CtxRoot: + if ctx.k.mounts == nil { + return nil + } + return ctx.k.mounts.Root() + case ktime.CtxRealtimeClock: + return ctx.k.RealtimeClock() + case limits.CtxLimits: + return ctx.args.Limits + case platform.CtxPlatform: + return ctx.k + case uniqueid.CtxGlobalUniqueID: + return ctx.k.UniqueID() + case uniqueid.CtxInotifyCookie: + return ctx.k.GenerateInotifyCookie() + default: + return nil + } +} + +// CreateProcess creates a new task in a new thread group with the given +// options. The new task has no parent and is in the root PID namespace. +// +// If k.Start() has already been called, the created task will begin running +// immediately. Otherwise, it will be started when k.Start() is called. +// +// CreateProcess has no analogue in Linux; it is used to create the initial +// application task, as well as processes started by the control server. +func (k *Kernel) CreateProcess(args CreateProcessArgs) (*ThreadGroup, error) { + k.extMu.Lock() + defer k.extMu.Unlock() + log.Infof("EXEC: %v", args.Argv) + + if k.mounts == nil { + return nil, fmt.Errorf("no kernel MountNamespace") + } + + tg := NewThreadGroup(k.tasks.Root, NewSignalHandlers(), linux.SIGCHLD, args.Limits, k.monotonicClock) + ctx := args.NewContext(k) + + // Grab the root directory. + root := fs.RootFromContext(ctx) + defer root.DecRef() + + // Grab the working directory. + wd := root // Default. + if args.WorkingDirectory != "" { + var err error + wd, err = k.mounts.FindInode(ctx, root, nil, args.WorkingDirectory, args.MaxSymlinkTraversals) + if err != nil { + return nil, fmt.Errorf("failed to find initial working directory %q: %v", args.WorkingDirectory, err) + } + defer wd.DecRef() + } + + if args.Filename == "" { + // Was anything provided? + if len(args.Argv) == 0 { + return nil, fmt.Errorf("no filename or command provided") + } + if !filepath.IsAbs(args.Argv[0]) { + return nil, fmt.Errorf("'%s' is not an absolute path", args.Argv[0]) + } + args.Filename = args.Argv[0] + } + + // Create a fresh task context. + tc, err := k.LoadTaskImage(ctx, k.mounts, root, wd, args.MaxSymlinkTraversals, args.Filename, args.Argv, args.Envv, k.featureSet) + if err != nil { + return nil, err + } + tr := newTaskResources(args.FDMap, newFSContext(root, wd, args.Umask)) + // NewTask unconditionally takes ownership of tr, so we never have to call + // tr.release. + + // Create the task. + config := &TaskConfig{ + Kernel: k, + ThreadGroup: tg, + TaskContext: tc, + TaskResources: tr, + Credentials: args.Credentials, + UTSNamespace: args.UTSNamespace, + IPCNamespace: args.IPCNamespace, + AllowedCPUMask: sched.NewFullCPUSet(k.applicationCores), + } + t, err := k.tasks.NewTask(config) + if err != nil { + return nil, err + } + + // Success. + if k.started { + tid := k.tasks.Root.IDOfTask(t) + t.Start(tid) + } else if k.globalInit == nil { + k.globalInit = tg + } + return tg, nil +} + +// Start starts execution of all tasks in k. +// +// Preconditions: Start may be called exactly once. +func (k *Kernel) Start() error { + k.extMu.Lock() + defer k.extMu.Unlock() + + if k.globalInit == nil { + return fmt.Errorf("kernel contains no tasks") + } + if k.started { + return fmt.Errorf("kernel already started") + } + + k.started = true + k.cpuClockTicker = ktime.NewTimer(k.monotonicClock, kernelCPUClockListener{k}) + k.cpuClockTicker.Swap(ktime.Setting{ + Enabled: true, + Period: linux.ClockTick, + }) + // If k was created by LoadKernelFrom, timers were stopped during + // Kernel.SaveTo and need to be resumed. If k was created by NewKernel, + // this is a no-op. + k.resumeTimeLocked() + // Start task goroutines. + k.tasks.mu.RLock() + defer k.tasks.mu.RUnlock() + for t, tid := range k.tasks.Root.tids { + t.Start(tid) + } + return nil +} + +// pauseTimeLocked pauses all Timers and Timekeeper updates. +// +// Preconditions: Any task goroutines running in k must be stopped. k.extMu +// must be locked. +func (k *Kernel) pauseTimeLocked() { + // k.cpuClockTicker may be nil since Kernel.SaveTo() may be called before + // Kernel.Start(). + if k.cpuClockTicker != nil { + k.cpuClockTicker.Pause() + } + + // By precondition, nothing else can be interacting with PIDNamespace.tids + // or FDMap.files, so we can iterate them without synchronization. (We + // can't hold the TaskSet mutex when pausing thread group timers because + // thread group timers call ThreadGroup.SendSignal, which takes the TaskSet + // mutex, while holding the Timer mutex.) + for t := range k.tasks.Root.tids { + if t == t.tg.leader { + t.tg.tm.pause() + } + // This means we'll iterate FDMaps shared by multiple tasks repeatedly, + // but ktime.Timer.Pause is idempotent so this is harmless. + if fdm := t.tr.FDMap; fdm != nil { + for _, desc := range fdm.files { + if tfd, ok := desc.file.FileOperations.(*timerfd.TimerOperations); ok { + tfd.PauseTimer() + } + } + } + } + k.timekeeper.PauseUpdates() +} + +// resumeTimeLocked resumes all Timers and Timekeeper updates. If +// pauseTimeLocked has not been previously called, resumeTimeLocked has no +// effect. +// +// Preconditions: Any task goroutines running in k must be stopped. k.extMu +// must be locked. +func (k *Kernel) resumeTimeLocked() { + if k.cpuClockTicker != nil { + k.cpuClockTicker.Resume() + } + + k.timekeeper.ResumeUpdates() + for t := range k.tasks.Root.tids { + if t == t.tg.leader { + t.tg.tm.resume() + } + if fdm := t.tr.FDMap; fdm != nil { + for _, desc := range fdm.files { + if tfd, ok := desc.file.FileOperations.(*timerfd.TimerOperations); ok { + tfd.ResumeTimer() + } + } + } + } +} + +// WaitExited blocks until all tasks in k have exited. +func (k *Kernel) WaitExited() { + k.tasks.liveGoroutines.Wait() +} + +// Kill requests that all tasks in k immediately exit as if group exiting with +// status es. Kill does not wait for tasks to exit. +func (k *Kernel) Kill(es ExitStatus) { + k.extMu.Lock() + defer k.extMu.Unlock() + k.tasks.Kill(es) +} + +// Pause requests that all tasks in k temporarily stop executing, and blocks +// until all tasks in k have stopped. Multiple calls to Pause nest and require +// an equal number of calls to Unpause to resume execution. +func (k *Kernel) Pause() { + k.extMu.Lock() + k.tasks.BeginExternalStop() + k.extMu.Unlock() + k.tasks.runningGoroutines.Wait() +} + +// Unpause ends the effect of a previous call to Pause. If Unpause is called +// without a matching preceding call to Pause, Unpause may panic. +func (k *Kernel) Unpause() { + k.extMu.Lock() + defer k.extMu.Unlock() + k.tasks.EndExternalStop() +} + +// SendExternalSignal injects a signal into the kernel. +// +// context is used only for debugging to describe how the signal was received. +// +// Returns false if signal could not be sent because the Kernel is not fully +// initialized yet. +func (k *Kernel) SendExternalSignal(info *arch.SignalInfo, context string) bool { + k.extMu.Lock() + defer k.extMu.Unlock() + return k.sendExternalSignal(info, context) +} + +// FeatureSet returns the FeatureSet. +func (k *Kernel) FeatureSet() *cpuid.FeatureSet { + return k.featureSet +} + +// Timekeeper returns the Timekeeper. +func (k *Kernel) Timekeeper() *Timekeeper { + return k.timekeeper +} + +// TaskSet returns the TaskSet. +func (k *Kernel) TaskSet() *TaskSet { + return k.tasks +} + +// RootUserNamespace returns the root UserNamespace. +func (k *Kernel) RootUserNamespace() *auth.UserNamespace { + return k.rootUserNamespace +} + +// RootUTSNamespace returns the root UTSNamespace. +func (k *Kernel) RootUTSNamespace() *UTSNamespace { + return k.rootUTSNamespace +} + +// RootIPCNamespace returns the root IPCNamespace. +func (k *Kernel) RootIPCNamespace() *IPCNamespace { + return k.rootIPCNamespace +} + +// RootMountNamespace returns the MountNamespace. +func (k *Kernel) RootMountNamespace() *fs.MountNamespace { + k.extMu.Lock() + defer k.extMu.Unlock() + return k.mounts +} + +// SetRootMountNamespace sets the MountNamespace. +func (k *Kernel) SetRootMountNamespace(mounts *fs.MountNamespace) { + k.extMu.Lock() + defer k.extMu.Unlock() + k.mounts = mounts +} + +// NetworkStack returns the network stack. NetworkStack may return nil if no +// network stack is available. +func (k *Kernel) NetworkStack() inet.Stack { + return k.networkStack +} + +// GlobalInit returns the thread group with ID 1 in the root PID namespace, or +// nil if no such thread group exists. GlobalInit may return a thread group +// containing no tasks if the thread group has already exited. +func (k *Kernel) GlobalInit() *ThreadGroup { + k.extMu.Lock() + defer k.extMu.Unlock() + return k.globalInit +} + +// ApplicationCores returns the number of CPUs visible to sandboxed +// applications. +func (k *Kernel) ApplicationCores() uint { + return k.applicationCores +} + +// RealtimeClock returns the application CLOCK_REALTIME clock. +func (k *Kernel) RealtimeClock() ktime.Clock { + return k.realtimeClock +} + +// MonotonicClock returns the application CLOCK_MONOTONIC clock. +func (k *Kernel) MonotonicClock() ktime.Clock { + return k.monotonicClock +} + +// CPUClockNow returns the current value of k.cpuClock. +func (k *Kernel) CPUClockNow() uint64 { + return atomic.LoadUint64(&k.cpuClock) +} + +// Syslog returns the syslog. +func (k *Kernel) Syslog() *syslog { + return &k.syslog +} + +// GenerateInotifyCookie generates a unique inotify event cookie. +// +// Returned values may overlap with previously returned values if the value +// space is exhausted. 0 is not a valid cookie value, all other values +// representable in a uint32 are allowed. +func (k *Kernel) GenerateInotifyCookie() uint32 { + id := atomic.AddUint32(&k.nextInotifyCookie, 1) + // Wrap-around is explicitly allowed for inotify event cookies. + if id == 0 { + id = atomic.AddUint32(&k.nextInotifyCookie, 1) + } + return id +} + +// NetlinkPorts returns the netlink port manager. +func (k *Kernel) NetlinkPorts() *port.Manager { + return k.netlinkPorts +} + +// ExitError returns the sandbox error that caused the kernel to exit. +func (k *Kernel) ExitError() error { + k.extMu.Lock() + defer k.extMu.Unlock() + return k.exitErr +} + +// SetExitError sets the sandbox error that caused the kernel to exit, if one is +// not already set. +func (k *Kernel) SetExitError(err error) { + k.extMu.Lock() + defer k.extMu.Unlock() + if k.exitErr == nil { + k.exitErr = err + } +} + +// SupervisorContext returns a Context with maximum privileges in k. It should +// only be used by goroutines outside the control of the emulated kernel +// defined by e. +// +// Callers are responsible for ensuring that the returned Context is not used +// concurrently with changes to the Kernel. +func (k *Kernel) SupervisorContext() context.Context { + return supervisorContext{ + Logger: log.Log(), + k: k, + } +} + +type supervisorContext struct { + context.NoopSleeper + log.Logger + k *Kernel +} + +// Value implements context.Context. +func (ctx supervisorContext) Value(key interface{}) interface{} { + switch key { + case CtxCanTrace: + // The supervisor context can trace anything. (None of + // supervisorContext's users are expected to invoke ptrace, but ptrace + // permissions are required for certain file accesses.) + return func(*Task, bool) bool { return true } + case CtxKernel: + return ctx.k + case CtxPIDNamespace: + return ctx.k.tasks.Root + case CtxUTSNamespace: + return ctx.k.rootUTSNamespace + case CtxIPCNamespace: + return ctx.k.rootIPCNamespace + case auth.CtxCredentials: + // The supervisor context is global root. + return auth.NewRootCredentials(ctx.k.rootUserNamespace) + case fs.CtxRoot: + return ctx.k.mounts.Root() + case ktime.CtxRealtimeClock: + return ctx.k.RealtimeClock() + case limits.CtxLimits: + // No limits apply. + return limits.NewLimitSet() + case platform.CtxPlatform: + return ctx.k + case uniqueid.CtxGlobalUniqueID: + return ctx.k.UniqueID() + case uniqueid.CtxInotifyCookie: + return ctx.k.GenerateInotifyCookie() + default: + return nil + } +} + +type kernelCPUClockListener struct { + k *Kernel +} + +// Notify implements ktime.TimerListener.Notify. +func (l kernelCPUClockListener) Notify(exp uint64) { + atomic.AddUint64(&l.k.cpuClock, exp) +} + +// Destroy implements ktime.TimerListener.Destroy. +func (l kernelCPUClockListener) Destroy() { +} diff --git a/pkg/sentry/kernel/memevent/BUILD b/pkg/sentry/kernel/memevent/BUILD new file mode 100644 index 000000000..c7779e1d5 --- /dev/null +++ b/pkg/sentry/kernel/memevent/BUILD @@ -0,0 +1,31 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "memevent", + srcs = ["memory_events.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/memevent", + visibility = ["//:sandbox"], + deps = [ + ":memory_events_go_proto", + "//pkg/eventchannel", + "//pkg/log", + "//pkg/sentry/kernel", + "//pkg/sentry/usage", + ], +) + +proto_library( + name = "memory_events_proto", + srcs = ["memory_events.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "memory_events_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/memevent/memory_events_go_proto", + proto = ":memory_events_proto", + visibility = ["//visibility:public"], +) diff --git a/pkg/sentry/kernel/memevent/memory_events.go b/pkg/sentry/kernel/memevent/memory_events.go new file mode 100644 index 000000000..ecc9151de --- /dev/null +++ b/pkg/sentry/kernel/memevent/memory_events.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memevent implements the memory usage events controller, which +// periodically emits events via the eventchannel. +package memevent + +import ( + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/eventchannel" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + pb "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/memevent/memory_events_go_proto" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" +) + +// MemoryEvents describes the configuration for the global memory event emitter. +type MemoryEvents struct { + k *kernel.Kernel + + // The period is how often to emit an event. The memory events goroutine + // will ensure a minimum of one event is emitted per this period, regardless + // how of much memory usage has changed. + period time.Duration + + // Writing to this channel indicates the memory goroutine should stop. + stop chan struct{} + + // done is used to signal when the memory event goroutine has exited. + done sync.WaitGroup +} + +// New creates a new MemoryEvents. +func New(k *kernel.Kernel, period time.Duration) *MemoryEvents { + return &MemoryEvents{ + k: k, + period: period, + stop: make(chan struct{}), + } +} + +// Stop stops the memory usage events emitter goroutine. Stop must not be called +// concurrently with Start and may only be called once. +func (m *MemoryEvents) Stop() { + close(m.stop) + m.done.Wait() +} + +// Start starts the memory usage events emitter goroutine. Start must not be +// called concurrently with Stop and may only be called once. +func (m *MemoryEvents) Start() { + if m.period == 0 { + return + } + go m.run() // S/R-SAFE: doesn't interact with saved state. +} + +func (m *MemoryEvents) run() { + m.done.Add(1) + + ticker := time.NewTicker(m.period) + defer ticker.Stop() + + for { + select { + case <-m.stop: + m.done.Done() + return + case <-ticker.C: + m.emit() + } + } +} + +func (m *MemoryEvents) emit() { + totalPlatform, err := m.k.Platform.Memory().TotalUsage() + if err != nil { + log.Warningf("Failed to fetch memory usage for memory events: %v", err) + return + } + snapshot, _ := usage.MemoryAccounting.Copy() + total := totalPlatform + snapshot.Mapped + + eventchannel.Emit(&pb.MemoryUsageEvent{Total: total}) +} diff --git a/pkg/sentry/kernel/memevent/memory_events.proto b/pkg/sentry/kernel/memevent/memory_events.proto new file mode 100644 index 000000000..e6e0bd628 --- /dev/null +++ b/pkg/sentry/kernel/memevent/memory_events.proto @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +// MemoryUsageEvent describes the memory usage of the sandbox at a single +// instant in time. These messages are emitted periodically on the eventchannel. +message MemoryUsageEvent { + // The total memory usage of the sandboxed application in bytes, calculated + // using the 'fast' method. + uint64 total = 1; +} diff --git a/pkg/sentry/kernel/pending_signals.go b/pkg/sentry/kernel/pending_signals.go new file mode 100644 index 000000000..d8701f47a --- /dev/null +++ b/pkg/sentry/kernel/pending_signals.go @@ -0,0 +1,126 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bits" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" +) + +const ( + // stdSignalCap is the maximum number of instances of a given standard + // signal that may be pending. ("[If] multiple instances of a standard + // signal are delivered while that signal is currently blocked, then only + // one instance is queued.") - signal(7) + stdSignalCap = 1 + + // rtSignalCap is the maximum number of instances of a given realtime + // signal that may be pending. + // + // TODO: In Linux, the minimum signal queue size is + // RLIMIT_SIGPENDING, which is by default max_threads/2. + rtSignalCap = 32 +) + +// pendingSignals holds a collection of pending signals. The zero value of +// pendingSignals is a valid empty collection. pendingSignals is thread-unsafe; +// users must provide synchronization. +type pendingSignals struct { + // signals contains all pending signals. + // + // Note that signals is zero-indexed, but signal 1 is the first valid + // signal, so signals[0] contains signals with signo 1 etc. This offset is + // usually handled by using Signal.index(). + signals [linux.SignalMaximum]pendingSignalQueue + + // Bit i of pendingSet is set iff there is at least one signal with signo + // i+1 pending. + pendingSet linux.SignalSet +} + +// pendingSignalQueue holds a pendingSignalList for a single signal number. +type pendingSignalQueue struct { + pendingSignalList + length int +} + +type pendingSignal struct { + // pendingSignalEntry links into a pendingSignalList. + pendingSignalEntry + *arch.SignalInfo +} + +// enqueue enqueues the given signal. enqueue returns true on success and false +// on failure (if the given signal's queue is full). +// +// Preconditions: info represents a valid signal. +func (p *pendingSignals) enqueue(info *arch.SignalInfo) bool { + sig := linux.Signal(info.Signo) + q := &p.signals[sig.Index()] + if sig.IsStandard() { + if q.length >= stdSignalCap { + return false + } + } else if q.length >= rtSignalCap { + return false + } + q.pendingSignalList.PushBack(&pendingSignal{SignalInfo: info}) + q.length++ + p.pendingSet |= linux.SignalSetOf(sig) + return true +} + +// dequeue dequeues and returns any pending signal not masked by mask. If no +// unmasked signals are pending, dequeue returns nil. +func (p *pendingSignals) dequeue(mask linux.SignalSet) *arch.SignalInfo { + // "Real-time signals are delivered in a guaranteed order. Multiple + // real-time signals of the same type are delivered in the order they were + // sent. If different real-time signals are sent to a process, they are + // delivered starting with the lowest-numbered signal. (I.e., low-numbered + // signals have highest priority.) By contrast, if multiple standard + // signals are pending for a process, the order in which they are delivered + // is unspecified. If both standard and real-time signals are pending for a + // process, POSIX leaves it unspecified which is delivered first. Linux, + // like many other implementations, gives priority to standard signals in + // this case." - signal(7) + lowestPendingUnblockedBit := bits.TrailingZeros64(uint64(p.pendingSet &^ mask)) + if lowestPendingUnblockedBit >= linux.SignalMaximum { + return nil + } + return p.dequeueSpecific(linux.Signal(lowestPendingUnblockedBit + 1)) +} + +func (p *pendingSignals) dequeueSpecific(sig linux.Signal) *arch.SignalInfo { + q := &p.signals[sig.Index()] + ps := q.pendingSignalList.Front() + if ps == nil { + return nil + } + q.pendingSignalList.Remove(ps) + q.length-- + if q.length == 0 { + p.pendingSet &^= linux.SignalSetOf(sig) + } + return ps.SignalInfo +} + +// discardSpecific causes all pending signals with number sig to be discarded. +func (p *pendingSignals) discardSpecific(sig linux.Signal) { + q := &p.signals[sig.Index()] + q.pendingSignalList.Reset() + q.length = 0 + p.pendingSet &^= linux.SignalSetOf(sig) +} diff --git a/pkg/sentry/kernel/pipe/BUILD b/pkg/sentry/kernel/pipe/BUILD new file mode 100644 index 000000000..ca9825f9d --- /dev/null +++ b/pkg/sentry/kernel/pipe/BUILD @@ -0,0 +1,68 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "pipe_state", + srcs = [ + "buffers.go", + "node.go", + "pipe.go", + "reader.go", + "reader_writer.go", + "writer.go", + ], + out = "pipe_state.go", + package = "pipe", +) + +go_library( + name = "pipe", + srcs = [ + "buffers.go", + "device.go", + "node.go", + "pipe.go", + "pipe_state.go", + "reader.go", + "reader_writer.go", + "writer.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/pipe", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/amutex", + "//pkg/ilist", + "//pkg/log", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/waiter", + ], +) + +go_test( + name = "pipe_test", + size = "small", + srcs = [ + "node_test.go", + "pipe_test.go", + ], + embed = [":pipe"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/fs", + "//pkg/sentry/usermem", + "//pkg/syserror", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/kernel/pipe/buffers.go b/pkg/sentry/kernel/pipe/buffers.go new file mode 100644 index 000000000..f300537c5 --- /dev/null +++ b/pkg/sentry/kernel/pipe/buffers.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "gvisor.googlesource.com/gvisor/pkg/ilist" +) + +// Buffer encapsulates a queueable byte buffer that can +// easily be truncated. It is designed only for use with pipes. +type Buffer struct { + ilist.Entry + data []byte +} + +// newBuffer initializes a Buffer. +func newBuffer(buf []byte) *Buffer { + return &Buffer{data: buf} +} + +// bytes returns the bytes contained in the buffer. +func (b *Buffer) bytes() []byte { + return b.data +} + +// size returns the number of bytes contained in the buffer. +func (b *Buffer) size() int { + return len(b.data) +} + +// truncate removes the first n bytes from the buffer. +func (b *Buffer) truncate(n int) int { + if n > len(b.data) { + panic("Trying to truncate past end of array.") + } + b.data = b.data[n:] + return len(b.data) +} diff --git a/pkg/sentry/kernel/pipe/device.go b/pkg/sentry/kernel/pipe/device.go new file mode 100644 index 000000000..8d383577a --- /dev/null +++ b/pkg/sentry/kernel/pipe/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// pipeDevice is used for all pipe files. +var pipeDevice = device.NewAnonDevice() diff --git a/pkg/sentry/kernel/pipe/node.go b/pkg/sentry/kernel/pipe/node.go new file mode 100644 index 000000000..5b47427ef --- /dev/null +++ b/pkg/sentry/kernel/pipe/node.go @@ -0,0 +1,175 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/amutex" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// inodeOperations wraps fs.InodeOperations operations with common pipe opening semantics. +type inodeOperations struct { + fs.InodeOperations + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // p is the underlying Pipe object representing this fifo. + p *Pipe + + // Channels for synchronizing the creation of new readers and writers of + // this fifo. See waitFor and newHandleLocked. + // + // These are not saved/restored because all waiters are unblocked on save, + // and either automatically restart (via ERESTARTSYS) or return EINTR on + // resume. On restarts via ERESTARTSYS, the appropriate channel will be + // recreated. + rWakeup chan struct{} `state:"nosave"` + wWakeup chan struct{} `state:"nosave"` +} + +// NewInodeOperations creates a new pipe fs.InodeOperations. +func NewInodeOperations(base fs.InodeOperations, p *Pipe) fs.InodeOperations { + return &inodeOperations{ + InodeOperations: base, + p: p, + } +} + +// GetFile implements fs.InodeOperations.GetFile. Named pipes have special blocking +// semantics during open: +// +// "Normally, opening the FIFO blocks until the other end is opened also. A +// process can open a FIFO in nonblocking mode. In this case, opening for +// read-only will succeed even if no-one has opened on the write side yet, +// opening for write-only will fail with ENXIO (no such device or address) +// unless the other end has already been opened. Under Linux, opening a FIFO +// for read and write will succeed both in blocking and nonblocking mode. POSIX +// leaves this behavior undefined. This can be used to open a FIFO for writing +// while there are no readers available." - fifo(7) +func (i *inodeOperations) GetFile(ctx context.Context, d *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { + i.mu.Lock() + defer i.mu.Unlock() + + switch { + case flags.Read && !flags.Write: // O_RDONLY. + r := i.p.ROpen(ctx) + i.newHandleLocked(&i.rWakeup) + + if i.p.isNamed && !flags.NonBlocking && !i.p.HasWriters() { + if !i.waitFor(&i.wWakeup, ctx) { + r.DecRef() + return nil, syserror.ErrInterrupted + } + } + + // By now, either we're doing a nonblocking open or we have a writer. On + // a nonblocking read-only open, the open succeeds even if no-one has + // opened the write side yet. + return r, nil + + case flags.Write && !flags.Read: // O_WRONLY. + w := i.p.WOpen(ctx) + i.newHandleLocked(&i.wWakeup) + + if i.p.isNamed && !i.p.HasReaders() { + // On a nonblocking, write-only open, the open fails with ENXIO if the + // read side isn't open yet. + if flags.NonBlocking { + w.DecRef() + return nil, syserror.ENXIO + } + + if !i.waitFor(&i.rWakeup, ctx) { + w.DecRef() + return nil, syserror.ErrInterrupted + } + } + return w, nil + + case flags.Read && flags.Write: // O_RDWR. + // Pipes opened for read-write always succeeds without blocking. + rw := i.p.RWOpen(ctx) + i.newHandleLocked(&i.rWakeup) + i.newHandleLocked(&i.wWakeup) + return rw, nil + + default: + return nil, syserror.EINVAL + } +} + +// waitFor blocks until the underlying pipe has at least one reader/writer is +// announced via 'wakeupChan', or until 'sleeper' is cancelled. Any call to this +// function will block for either readers or writers, depending on where +// 'wakeupChan' points. +// +// f.mu must be held by the caller. waitFor returns with f.mu held, but it will +// drop f.mu before blocking for any reader/writers. +func (i *inodeOperations) waitFor(wakeupChan *chan struct{}, sleeper amutex.Sleeper) bool { + // Ideally this function would simply use a condition variable. However, the + // wait needs to be interruptible via 'sleeper', so we must sychronize via a + // channel. The synchronization below relies on the fact that closing a + // channel unblocks all receives on the channel. + + // Does an appropriate wakeup channel already exist? If not, create a new + // one. This is all done under f.mu to avoid races. + if *wakeupChan == nil { + *wakeupChan = make(chan struct{}) + } + + // Grab a local reference to the wakeup channel since it may disappear as + // soon as we drop f.mu. + wakeup := *wakeupChan + + // Drop the lock and prepare to sleep. + i.mu.Unlock() + cancel := sleeper.SleepStart() + + // Wait for either a new reader/write to be signalled via 'wakeup', or + // for the sleep to be cancelled. + select { + case <-wakeup: + sleeper.SleepFinish(true) + case <-cancel: + sleeper.SleepFinish(false) + } + + // Take the lock and check if we were woken. If we were woken and + // interrupted, the former takes priority. + i.mu.Lock() + select { + case <-wakeup: + return true + default: + return false + } +} + +// newHandleLocked signals a new pipe reader or writer depending on where +// 'wakeupChan' points. This unblocks any corresponding reader or writer +// waiting for the other end of the channel to be opened, see Fifo.waitFor. +// +// i.mu must be held. +func (*inodeOperations) newHandleLocked(wakeupChan *chan struct{}) { + if *wakeupChan != nil { + close(*wakeupChan) + *wakeupChan = nil + } +} diff --git a/pkg/sentry/kernel/pipe/node_test.go b/pkg/sentry/kernel/pipe/node_test.go new file mode 100644 index 000000000..cc1ebf4f6 --- /dev/null +++ b/pkg/sentry/kernel/pipe/node_test.go @@ -0,0 +1,308 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +type sleeper struct { + context.Context + ch chan struct{} +} + +func newSleeperContext(t *testing.T) context.Context { + return &sleeper{ + Context: contexttest.Context(t), + ch: make(chan struct{}), + } +} + +func (s *sleeper) SleepStart() <-chan struct{} { + return s.ch +} + +func (s *sleeper) SleepFinish(bool) { +} + +func (s *sleeper) Cancel() { + s.ch <- struct{}{} +} + +type openResult struct { + *fs.File + error +} + +func testOpenOrDie(ctx context.Context, t *testing.T, n fs.InodeOperations, flags fs.FileFlags, doneChan chan<- struct{}) (*fs.File, error) { + file, err := n.GetFile(ctx, nil, flags) + if err != nil { + t.Fatalf("open with flags %+v failed: %v", flags, err) + } + if doneChan != nil { + doneChan <- struct{}{} + } + return file, err +} + +func testOpen(ctx context.Context, t *testing.T, n fs.InodeOperations, flags fs.FileFlags, resChan chan<- openResult) (*fs.File, error) { + file, err := n.GetFile(ctx, nil, flags) + if resChan != nil { + resChan <- openResult{file, err} + } + return file, err +} + +func newNamedPipe(t *testing.T) *Pipe { + return NewPipe(contexttest.Context(t), true, DefaultPipeSize, usermem.PageSize) +} + +func newAnonPipe(t *testing.T) *Pipe { + return NewPipe(contexttest.Context(t), false, DefaultPipeSize, usermem.PageSize) +} + +// assertRecvBlocks ensures that a recv attempt on c blocks for at least +// blockDuration. This is useful for checking that a goroutine that is supposed +// to be executing a blocking operation is actually blocking. +func assertRecvBlocks(t *testing.T, c <-chan struct{}, blockDuration time.Duration, failMsg string) { + select { + case <-c: + t.Fatalf(failMsg) + case <-time.After(blockDuration): + // Ok, blocked for the required duration. + } +} + +func TestReadOpenBlocksForWriteOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone) + + // Verify that the open for read is blocking. + assertRecvBlocks(t, rDone, time.Millisecond*100, + "open for read not blocking with no writers") + + wDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + + <-wDone + <-rDone +} + +func TestWriteOpenBlocksForReadOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + wDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + + // Verify that the open for write is blocking + assertRecvBlocks(t, wDone, time.Millisecond*100, + "open for write not blocking with no readers") + + rDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone) + + <-rDone + <-wDone +} + +func TestMultipleWriteOpenDoesntCountAsReadOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rDone1 := make(chan struct{}) + rDone2 := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone1) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone2) + + assertRecvBlocks(t, rDone1, time.Millisecond*100, + "open for read didn't block with no writers") + assertRecvBlocks(t, rDone2, time.Millisecond*100, + "open for read didn't block with no writers") + + wDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + + <-wDone + <-rDone2 + <-rDone1 +} + +func TestClosedReaderBlocksWriteOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rFile, _ := testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil) + rFile.DecRef() + + wDone := make(chan struct{}) + // This open for write should block because the reader is now gone. + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + assertRecvBlocks(t, wDone, time.Millisecond*100, + "open for write didn't block with no concurrent readers") + + // Open for read again. This should unblock the open for write. + rDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone) + + <-rDone + <-wDone +} + +func TestReadWriteOpenNeverBlocks(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rwDone := make(chan struct{}) + // Open for read-write never wait for a reader or writer, even if the + // nonblocking flag is not set. + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true, NonBlocking: false}, rwDone) + <-rwDone +} + +func TestReadWriteOpenUnblocksReadOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone) + + rwDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true}, rwDone) + + <-rwDone + <-rDone +} + +func TestReadWriteOpenUnblocksWriteOpen(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + wDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + + rwDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true, Write: true}, rwDone) + + <-rwDone + <-wDone +} + +func TestBlockedOpenIsCancellable(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + done := make(chan openResult) + go testOpen(ctx, t, f, fs.FileFlags{Read: true}, done) + select { + case <-done: + t.Fatalf("open for read didn't block with no writers") + case <-time.After(time.Millisecond * 100): + // Ok. + } + + ctx.(*sleeper).Cancel() + // If the cancel on the sleeper didn't work, the open for read would never + // return. + res := <-done + if res.error != syserror.ErrInterrupted { + t.Fatalf("Cancellation didn't cause GetFile to return fs.ErrInterrupted, got %v.", + res.error) + } +} + +func TestNonblockingReadOpenNoWriters(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil); err != nil { + t.Fatalf("Nonblocking open for read failed with error %v.", err) + } +} + +func TestNonblockingWriteOpenNoReaders(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); err != syserror.ENXIO { + t.Fatalf("Nonblocking open for write failed unexpected error %v.", err) + } +} + +func TestNonBlockingReadOpenWithWriter(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + wDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Write: true}, wDone) + + // Open for write blocks since there are no readers yet. + assertRecvBlocks(t, wDone, time.Millisecond*100, + "Open for write didn't block with no reader.") + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true, NonBlocking: true}, nil); err != nil { + t.Fatalf("Nonblocking open for read failed with error %v.", err) + } + + // Open for write should now be unblocked. + <-wDone +} + +func TestNonBlockingWriteOpenWithReader(t *testing.T) { + f := NewInodeOperations(nil, newNamedPipe(t)) + ctx := newSleeperContext(t) + + rDone := make(chan struct{}) + go testOpenOrDie(ctx, t, f, fs.FileFlags{Read: true}, rDone) + + // Open for write blocked, since no reader yet. + assertRecvBlocks(t, rDone, time.Millisecond*100, + "Open for reader didn't block with no writer.") + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true, NonBlocking: true}, nil); err != nil { + t.Fatalf("Nonblocking open for write failed with error %v.", err) + } + + // Open for write should now be unblocked. + <-rDone +} + +func TestAnonReadOpen(t *testing.T) { + f := NewInodeOperations(nil, newAnonPipe(t)) + ctx := newSleeperContext(t) + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Read: true}, nil); err != nil { + t.Fatalf("open anon pipe for read failed: %v", err) + } +} + +func TestAnonWriteOpen(t *testing.T) { + f := NewInodeOperations(nil, newAnonPipe(t)) + ctx := newSleeperContext(t) + + if _, err := testOpen(ctx, t, f, fs.FileFlags{Write: true}, nil); err != nil { + t.Fatalf("open anon pipe for write failed: %v", err) + } +} diff --git a/pkg/sentry/kernel/pipe/pipe.go b/pkg/sentry/kernel/pipe/pipe.go new file mode 100644 index 000000000..1656c6ff3 --- /dev/null +++ b/pkg/sentry/kernel/pipe/pipe.go @@ -0,0 +1,335 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pipe provides an in-memory implementation of a unidirectional +// pipe. +// +// The goal of this pipe is to emulate the pipe syscall in all of its +// edge cases and guarantees of atomic IO. +package pipe + +import ( + "fmt" + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// DefaultPipeSize is the system-wide default size of a pipe in bytes. +const DefaultPipeSize = 65536 + +// Pipe is an encapsulation of a platform-independent pipe. +// It manages a buffered byte queue shared between a reader/writer +// pair. +type Pipe struct { + waiter.Queue `state:"nosave"` + + // Whether this is a named or anonymous pipe. + isNamed bool + + // The dirent backing this pipe. Shared by all readers and writers. + dirent *fs.Dirent + + // The buffered byte queue. + data ilist.List + + // Max size of the pipe in bytes. When this max has been reached, + // writers will get EWOULDBLOCK. + max int + + // Current size of the pipe in bytes. + size int + + // Max number of bytes the pipe can guarantee to read or write + // atomically. + atomicIOBytes int + + // The number of active readers for this pipe. Load/store atomically. + readers int32 + + // The number of active writes for this pipe. Load/store atomically. + writers int32 + + // This flag indicates if this pipe ever had a writer. Note that this does + // not necessarily indicate there is *currently* a writer, just that there + // has been a writer at some point since the pipe was created. + // + // Protected by mu. + hadWriter bool + + // Lock protecting all pipe internal state. + mu sync.Mutex `state:"nosave"` +} + +// NewPipe initializes and returns a pipe. A pipe created by this function is +// persistent, and will remain valid even without any open fds to it. Named +// pipes for mknod(2) are created via this function. Note that the +// implementation of blocking semantics for opening the read and write ends of a +// named pipe are left to filesystems. +func NewPipe(ctx context.Context, isNamed bool, sizeBytes, atomicIOBytes int) *Pipe { + p := &Pipe{ + isNamed: isNamed, + max: sizeBytes, + atomicIOBytes: atomicIOBytes, + } + + // Build the fs.Dirent of this pipe, shared by all fs.Files associated + // with this pipe. + ino := pipeDevice.NextIno() + base := fsutil.NewSimpleInodeOperations(fsutil.InodeSimpleAttributes{ + FSType: linux.PIPEFS_MAGIC, + UAttr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + Links: 1, + }), + }) + sattr := fs.StableAttr{ + Type: fs.Pipe, + DeviceID: pipeDevice.DeviceID(), + InodeID: ino, + BlockSize: int64(atomicIOBytes), + } + // There is no real filesystem backing this pipe, so we pass in a nil + // Filesystem. + sb := fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}) + p.dirent = fs.NewDirent(fs.NewInode(NewInodeOperations(base, p), sb, sattr), fmt.Sprintf("pipe:[%d]", ino)) + + return p +} + +// NewConnectedPipe initializes a pipe and returns a pair of objects (which +// implement kio.File) representing the read and write ends of the pipe. A pipe +// created by this function becomes invalid as soon as either the read or write +// end is closed, and errors on subsequent operations on either end. Pipes +// for pipe(2) and pipe2(2) are generally created this way. +func NewConnectedPipe(ctx context.Context, sizeBytes int, atomicIOBytes int) (*fs.File, *fs.File) { + p := NewPipe(ctx, false /* isNamed */, sizeBytes, atomicIOBytes) + return p.ROpen(ctx), p.WOpen(ctx) +} + +// ROpen opens the pipe for reading. +func (p *Pipe) ROpen(ctx context.Context) *fs.File { + p.rOpen() + return fs.NewFile(ctx, p.dirent, fs.FileFlags{Read: true}, &Reader{ + ReaderWriter: ReaderWriter{Pipe: p}, + }) +} + +// WOpen opens the pipe for writing. +func (p *Pipe) WOpen(ctx context.Context) *fs.File { + p.wOpen() + return fs.NewFile(ctx, p.dirent, fs.FileFlags{Write: true}, &Writer{ + ReaderWriter: ReaderWriter{Pipe: p}, + }) +} + +// RWOpen opens the pipe for both reading and writing. +func (p *Pipe) RWOpen(ctx context.Context) *fs.File { + p.rOpen() + p.wOpen() + return fs.NewFile(ctx, p.dirent, fs.FileFlags{Read: true, Write: true}, &ReaderWriter{ + Pipe: p, + }) +} + +// read reads data from the pipe into dst and returns the number of bytes +// read, or returns ErrWouldBlock if the pipe is empty. +func (p *Pipe) read(ctx context.Context, dst usermem.IOSequence) (int64, error) { + if !p.HasReaders() { + return 0, syscall.EBADF + } + + // Don't block for a zero-length read even if the pipe is empty. + if dst.NumBytes() == 0 { + return 0, nil + } + + p.mu.Lock() + defer p.mu.Unlock() + // If there is nothing to read at the moment but there is a writer, tell the + // caller to block. + if p.size == 0 { + if !p.HasWriters() { + // There are no writers, return EOF. + return 0, nil + } + return 0, syserror.ErrWouldBlock + } + var n int64 + for b := p.data.Front(); b != nil; b = p.data.Front() { + buffer := b.(*Buffer) + n0, err := dst.CopyOut(ctx, buffer.bytes()) + n += int64(n0) + p.size -= n0 + if buffer.truncate(n0) == 0 { + p.data.Remove(b) + } + dst = dst.DropFirst(n0) + if dst.NumBytes() == 0 || err != nil { + return n, err + } + } + return n, nil +} + +// write writes data from sv into the pipe and returns the number of bytes +// written. If no bytes are written because the pipe is full (or has less than +// atomicIOBytes free capacity), write returns ErrWouldBlock. +func (p *Pipe) write(ctx context.Context, src usermem.IOSequence) (int64, error) { + p.mu.Lock() + defer p.mu.Unlock() + + if !p.HasWriters() { + return 0, syscall.EBADF + } + if !p.HasReaders() { + return 0, syscall.EPIPE + } + + // POSIX requires that a write smaller than atomicIOBytes (PIPE_BUF) be + // atomic, but requires no atomicity for writes larger than this. However, + // Linux appears to provide stronger semantics than this in practice: + // unmerged writes are done one PAGE_SIZE buffer at a time, so for larger + // writes, the writing of each PIPE_BUF-sized chunk is atomic. We implement + // this by writing at most atomicIOBytes at a time if we can't service the + // write in its entirety. + canWrite := src.NumBytes() + if canWrite > int64(p.max-p.size) { + if p.max-p.size >= p.atomicIOBytes { + canWrite = int64(p.atomicIOBytes) + } else { + return 0, syserror.ErrWouldBlock + } + } + + // Copy data from user memory into a pipe-owned buffer. + buf := make([]byte, canWrite) + n, err := src.CopyIn(ctx, buf) + if n > 0 { + p.data.PushBack(newBuffer(buf[:n])) + p.size += n + } + if int64(n) < src.NumBytes() && err == nil { + // Partial write due to full pipe. + err = syserror.ErrWouldBlock + } + return int64(n), err +} + +// rOpen signals a new reader of the pipe. +func (p *Pipe) rOpen() { + atomic.AddInt32(&p.readers, 1) +} + +// wOpen signals a new writer of the pipe. +func (p *Pipe) wOpen() { + p.mu.Lock() + defer p.mu.Unlock() + p.hadWriter = true + atomic.AddInt32(&p.writers, 1) +} + +// rClose signals that a reader has closed their end of the pipe. +func (p *Pipe) rClose() { + newReaders := atomic.AddInt32(&p.readers, -1) + if newReaders < 0 { + panic(fmt.Sprintf("Refcounting bug, pipe has negative readers: %v", newReaders)) + } +} + +// wClose signals that a writer has closed their end of the pipe. +func (p *Pipe) wClose() { + newWriters := atomic.AddInt32(&p.writers, -1) + if newWriters < 0 { + panic(fmt.Sprintf("Refcounting bug, pipe has negative writers: %v.", newWriters)) + } +} + +// HasReaders returns whether the pipe has any active readers. +func (p *Pipe) HasReaders() bool { + return atomic.LoadInt32(&p.readers) > 0 +} + +// HasWriters returns whether the pipe has any active writers. +func (p *Pipe) HasWriters() bool { + return atomic.LoadInt32(&p.writers) > 0 +} + +func (p *Pipe) rReadinessLocked() waiter.EventMask { + ready := waiter.EventMask(0) + if p.HasReaders() && p.data.Front() != nil { + ready |= waiter.EventIn + } + if !p.HasWriters() && p.hadWriter { + // POLLHUP must be supressed until the pipe has had at least one writer + // at some point. Otherwise a reader thread may poll and immediately get + // a POLLHUP before the writer ever opens the pipe, which the reader may + // interpret as the writer opening then closing the pipe. + ready |= waiter.EventHUp + } + return ready +} + +// rReadiness returns a mask that states whether the read end of the pipe is +// ready for reading. +func (p *Pipe) rReadiness() waiter.EventMask { + p.mu.Lock() + defer p.mu.Unlock() + return p.rReadinessLocked() +} + +func (p *Pipe) wReadinessLocked() waiter.EventMask { + ready := waiter.EventMask(0) + if p.HasWriters() && p.size < p.max { + ready |= waiter.EventOut + } + if !p.HasReaders() { + ready |= waiter.EventErr + } + return ready +} + +// wReadiness returns a mask that states whether the write end of the pipe +// is ready for writing. +func (p *Pipe) wReadiness() waiter.EventMask { + p.mu.Lock() + defer p.mu.Unlock() + return p.wReadinessLocked() +} + +// rwReadiness returns a mask that states whether a read-write handle to the +// pipe is ready for IO. +func (p *Pipe) rwReadiness() waiter.EventMask { + p.mu.Lock() + defer p.mu.Unlock() + return p.rReadinessLocked() | p.wReadinessLocked() +} + +func (p *Pipe) queuedSize() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.size +} diff --git a/pkg/sentry/kernel/pipe/pipe_test.go b/pkg/sentry/kernel/pipe/pipe_test.go new file mode 100644 index 000000000..49ef8c8ac --- /dev/null +++ b/pkg/sentry/kernel/pipe/pipe_test.go @@ -0,0 +1,138 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "bytes" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +func TestPipeRW(t *testing.T) { + ctx := contexttest.Context(t) + r, w := NewConnectedPipe(ctx, 65536, 4096) + defer r.DecRef() + defer w.DecRef() + + msg := []byte("here's some bytes") + wantN := int64(len(msg)) + n, err := w.Writev(ctx, usermem.BytesIOSequence(msg)) + if n != wantN || err != nil { + t.Fatalf("Writev: got (%d, %v), wanted (%d, nil)", n, err, wantN) + } + + buf := make([]byte, len(msg)) + n, err = r.Readv(ctx, usermem.BytesIOSequence(buf)) + if n != wantN || err != nil || !bytes.Equal(buf, msg) { + t.Fatalf("Readv: got (%d, %v) %q, wanted (%d, nil) %q", n, err, buf, wantN, msg) + } +} + +func TestPipeReadBlock(t *testing.T) { + ctx := contexttest.Context(t) + r, w := NewConnectedPipe(ctx, 65536, 4096) + defer r.DecRef() + defer w.DecRef() + + n, err := r.Readv(ctx, usermem.BytesIOSequence(make([]byte, 1))) + if n != 0 || err != syserror.ErrWouldBlock { + t.Fatalf("Readv: got (%d, %v), wanted (0, %v)", n, err, syserror.ErrWouldBlock) + } +} + +func TestPipeWriteBlock(t *testing.T) { + const atomicIOBytes = 2 + + ctx := contexttest.Context(t) + r, w := NewConnectedPipe(ctx, 10, atomicIOBytes) + defer r.DecRef() + defer w.DecRef() + + msg := []byte("here's some bytes") + n, err := w.Writev(ctx, usermem.BytesIOSequence(msg)) + if wantN, wantErr := int64(atomicIOBytes), syserror.ErrWouldBlock; n != wantN || err != wantErr { + t.Fatalf("Writev: got (%d, %v), wanted (%d, %v)", n, err, wantN, wantErr) + } +} + +func TestPipeWriteUntilEnd(t *testing.T) { + const atomicIOBytes = 2 + + ctx := contexttest.Context(t) + r, w := NewConnectedPipe(ctx, atomicIOBytes, atomicIOBytes) + defer r.DecRef() + defer w.DecRef() + + msg := []byte("here's some bytes") + + wDone := make(chan struct{}, 0) + rDone := make(chan struct{}, 0) + defer func() { + // Signal the reader to stop and wait until it does so. + close(wDone) + <-rDone + }() + + go func() { + defer close(rDone) + // Read from r until done is closed. + ctx := contexttest.Context(t) + buf := make([]byte, len(msg)+1) + dst := usermem.BytesIOSequence(buf) + e, ch := waiter.NewChannelEntry(nil) + r.EventRegister(&e, waiter.EventIn) + defer r.EventUnregister(&e) + for { + n, err := r.Readv(ctx, dst) + dst = dst.DropFirst64(n) + if err == syserror.ErrWouldBlock { + select { + case <-ch: + continue + case <-wDone: + // We expect to have 1 byte left in dst since len(buf) == + // len(msg)+1. + if dst.NumBytes() != 1 || !bytes.Equal(buf[:len(msg)], msg) { + t.Errorf("Reader: got %q (%d bytes remaining), wanted %q", buf, dst.NumBytes(), msg) + } + return + } + } + if err != nil { + t.Fatalf("Readv: got unexpected error %v", err) + } + } + }() + + src := usermem.BytesIOSequence(msg) + e, ch := waiter.NewChannelEntry(nil) + w.EventRegister(&e, waiter.EventOut) + defer w.EventUnregister(&e) + for src.NumBytes() != 0 { + n, err := w.Writev(ctx, src) + src = src.DropFirst64(n) + if err == syserror.ErrWouldBlock { + <-ch + continue + } + if err != nil { + t.Fatalf("Writev: got (%d, %v)", n, err) + } + } +} diff --git a/pkg/sentry/kernel/pipe/reader.go b/pkg/sentry/kernel/pipe/reader.go new file mode 100644 index 000000000..40d5e4943 --- /dev/null +++ b/pkg/sentry/kernel/pipe/reader.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Reader satisfies the fs.FileOperations interface for read-only pipes. +// Reader should be used with !fs.FileFlags.Write to reject writes. +type Reader struct { + ReaderWriter +} + +// Release implements fs.FileOperations.Release. +func (r *Reader) Release() { + r.Pipe.rClose() + // Wake up writers. + r.Pipe.Notify(waiter.EventOut) +} + +// Readiness returns the ready events in the underlying pipe. +func (r *Reader) Readiness(mask waiter.EventMask) waiter.EventMask { + return r.Pipe.rReadiness() & mask +} diff --git a/pkg/sentry/kernel/pipe/reader_writer.go b/pkg/sentry/kernel/pipe/reader_writer.go new file mode 100644 index 000000000..dc642a3a6 --- /dev/null +++ b/pkg/sentry/kernel/pipe/reader_writer.go @@ -0,0 +1,91 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "fmt" + "math" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// ReaderWriter satisfies the FileOperations interface and services both +// read and write requests. This should only be used directly for named pipes. +// pipe(2) and pipe2(2) only support unidirectional pipes and should use +// either pipe.Reader or pipe.Writer. +type ReaderWriter struct { + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + *Pipe +} + +// Release implements fs.FileOperations.Release. +func (rw *ReaderWriter) Release() { + rw.Pipe.rClose() + rw.Pipe.wClose() + // Wake up readers and writers. + rw.Pipe.Notify(waiter.EventIn | waiter.EventOut) +} + +// Read implements fs.FileOperations.Read. +func (rw *ReaderWriter) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + n, err := rw.Pipe.read(ctx, dst) + if n > 0 { + rw.Pipe.Notify(waiter.EventOut) + } + return n, err +} + +// Write implements fs.FileOperations.Write. +func (rw *ReaderWriter) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + n, err := rw.Pipe.write(ctx, src) + if n > 0 { + rw.Pipe.Notify(waiter.EventIn) + } + return n, err +} + +// Readiness returns the ready events in the underlying pipe. +func (rw *ReaderWriter) Readiness(mask waiter.EventMask) waiter.EventMask { + return rw.Pipe.rwReadiness() & mask +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (rw *ReaderWriter) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // Switch on ioctl request. + switch int(args[1].Int()) { + case syscall.TIOCINQ: + v := rw.queuedSize() + if v > math.MaxInt32 { + panic(fmt.Sprintf("Impossibly large pipe queued size: %d", v)) + } + // Copy result to user-space. + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + default: + return 0, syscall.ENOTTY + } +} diff --git a/pkg/sentry/kernel/pipe/writer.go b/pkg/sentry/kernel/pipe/writer.go new file mode 100644 index 000000000..fd13008ac --- /dev/null +++ b/pkg/sentry/kernel/pipe/writer.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pipe + +import ( + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Writer satisfies the fs.FileOperations interface for write-only pipes. +// Writer should be used with !fs.FileFlags.Read to reject reads. +type Writer struct { + ReaderWriter +} + +// Release implements fs.FileOperations.Release. +func (w *Writer) Release() { + w.Pipe.wClose() + // Wake up readers. + w.Pipe.Notify(waiter.EventHUp) +} + +// Readiness returns the ready events in the underlying pipe. +func (w *Writer) Readiness(mask waiter.EventMask) waiter.EventMask { + return w.Pipe.wReadiness() & mask +} diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go new file mode 100644 index 000000000..20b1c4cd4 --- /dev/null +++ b/pkg/sentry/kernel/ptrace.go @@ -0,0 +1,1054 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// ptrace constants from Linux's include/uapi/linux/ptrace.h. +const ( + _PTRACE_EVENT_SECCOMP = 7 + PTRACE_SEIZE = 0x4206 + PTRACE_INTERRUPT = 0x4207 + PTRACE_LISTEN = 0x4208 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_GETSIGMASK = 0x420a + PTRACE_SETSIGMASK = 0x420b + _PTRACE_O_EXITKILL = 1 << 20 + _PTRACE_O_TRACESECCOMP = 1 << _PTRACE_EVENT_SECCOMP +) + +// ptraceOptions are the subset of options controlling a task's ptrace behavior +// that are set by ptrace(PTRACE_SETOPTIONS). +type ptraceOptions struct { + // ExitKill is true if the tracee should be sent SIGKILL when the tracer + // exits. + ExitKill bool + + // If SysGood is true, set bit 7 in the signal number for + // syscall-entry-stop and syscall-exit-stop traps delivered to this task's + // tracer. + SysGood bool + + // TraceClone is true if the tracer wants to receive PTRACE_EVENT_CLONE + // events. + TraceClone bool + + // TraceExec is true if the tracer wants to receive PTRACE_EVENT_EXEC + // events. + TraceExec bool + + // TraceExit is true if the tracer wants to receive PTRACE_EVENT_EXIT + // events. + TraceExit bool + + // TraceFork is true if the tracer wants to receive PTRACE_EVENT_FORK + // events. + TraceFork bool + + // TraceSeccomp is true if the tracer wants to receive PTRACE_EVENT_SECCOMP + // events. + TraceSeccomp bool + + // TraceVfork is true if the tracer wants to receive PTRACE_EVENT_VFORK + // events. + TraceVfork bool + + // TraceVforkDone is true if the tracer wants to receive + // PTRACE_EVENT_VFORK_DONE events. + TraceVforkDone bool +} + +// ptraceSyscallMode controls the behavior of a ptraced task at syscall entry +// and exit. +type ptraceSyscallMode int + +const ( + // ptraceSyscallNone indicates that the task has never ptrace-stopped, or + // that it was resumed from its last ptrace-stop by PTRACE_CONT or + // PTRACE_DETACH. The task's syscalls will not be intercepted. + ptraceSyscallNone ptraceSyscallMode = iota + + // ptraceSyscallIntercept indicates that the task was resumed from its last + // ptrace-stop by PTRACE_SYSCALL. The next time the task enters or exits a + // syscall, a ptrace-stop will occur. + ptraceSyscallIntercept + + // ptraceSyscallEmu indicates that the task was resumed from its last + // ptrace-stop by PTRACE_SYSEMU or PTRACE_SYSEMU_SINGLESTEP. The next time + // the task enters a syscall, the syscall will be skipped, and a + // ptrace-stop will occur. + ptraceSyscallEmu +) + +// CanTrace checks that t is permitted to access target's state, as defined by +// ptrace(2), subsection "Ptrace access mode checking". If attach is true, it +// checks for access mode PTRACE_MODE_ATTACH; otherwise, it checks for access +// mode PTRACE_MODE_READ. +func (t *Task) CanTrace(target *Task, attach bool) bool { + // "1. If the calling thread and the target thread are in the same thread + // group, access is always allowed." - ptrace(2) + // + // Note: Strictly speaking, prior to 73af963f9f30 ("__ptrace_may_access() + // should not deny sub-threads", first released in Linux 3.12), the rule + // only applies if t and target are the same task. But, as that commit + // message puts it, "[any] security check is pointless when the tasks share + // the same ->mm." + if t.tg == target.tg { + return true + } + + // """ + // 2. If the access mode specifies PTRACE_MODE_FSCREDS (ED: snipped, + // doesn't exist until Linux 4.5). + // + // Otherwise, the access mode specifies PTRACE_MODE_REALCREDS, so use the + // caller's real UID and GID for the checks in the next step. (Most APIs + // that check the caller's UID and GID use the effective IDs. For + // historical reasons, the PTRACE_MODE_REALCREDS check uses the real IDs + // instead.) + // + // 3. Deny access if neither of the following is true: + // + // - The real, effective, and saved-set user IDs of the target match the + // caller's user ID, *and* the real, effective, and saved-set group IDs of + // the target match the caller's group ID. + // + // - The caller has the CAP_SYS_PTRACE capability in the user namespace of + // the target. + // + // 4. Deny access if the target process "dumpable" attribute has a value + // other than 1 (SUID_DUMP_USER; see the discussion of PR_SET_DUMPABLE in + // prctl(2)), and the caller does not have the CAP_SYS_PTRACE capability in + // the user namespace of the target process. + // + // 5. The kernel LSM security_ptrace_access_check() interface is invoked to + // see if ptrace access is permitted. The results depend on the LSM(s). The + // implementation of this interface in the commoncap LSM performs the + // following steps: + // + // a) If the access mode includes PTRACE_MODE_FSCREDS, then use the + // caller's effective capability set; otherwise (the access mode specifies + // PTRACE_MODE_REALCREDS, so) use the caller's permitted capability set. + // + // b) Deny access if neither of the following is true: + // + // - The caller and the target process are in the same user namespace, and + // the caller's capabilities are a proper superset of the target process's + // permitted capabilities. + // + // - The caller has the CAP_SYS_PTRACE capability in the target process's + // user namespace. + // + // Note that the commoncap LSM does not distinguish between + // PTRACE_MODE_READ and PTRACE_MODE_ATTACH. (ED: From earlier in this + // section: "the commoncap LSM ... is always invoked".) + // """ + callerCreds := t.Credentials() + targetCreds := target.Credentials() + if callerCreds.HasCapabilityIn(linux.CAP_SYS_PTRACE, targetCreds.UserNamespace) { + return true + } + if cuid := callerCreds.RealKUID; cuid != targetCreds.RealKUID || cuid != targetCreds.EffectiveKUID || cuid != targetCreds.SavedKUID { + return false + } + if cgid := callerCreds.RealKGID; cgid != targetCreds.RealKGID || cgid != targetCreds.EffectiveKGID || cgid != targetCreds.SavedKGID { + return false + } + // TODO: dumpability check + if callerCreds.UserNamespace != targetCreds.UserNamespace { + return false + } + if targetCreds.PermittedCaps&^callerCreds.PermittedCaps != 0 { + return false + } + // TODO: Yama LSM + return true +} + +// Tracer returns t's ptrace Tracer. +func (t *Task) Tracer() *Task { + return t.ptraceTracer.Load().(*Task) +} + +// hasTracer returns true if t has a ptrace tracer attached. +func (t *Task) hasTracer() bool { + // This isn't just inlined into callers so that if Task.Tracer() turns out + // to be too expensive because of e.g. interface conversion, we can switch + // to having a separate atomic flag more easily. + return t.Tracer() != nil +} + +// ptraceStop is a TaskStop placed on tasks in a ptrace-stop. +type ptraceStop struct { + // If frozen is true, the stopped task's tracer is currently operating on + // it, so Task.Kill should not remove the stop. + frozen bool +} + +// Killable implements TaskStop.Killable. +func (s *ptraceStop) Killable() bool { + return !s.frozen +} + +// beginPtraceStopLocked initiates an unfrozen ptrace-stop on t. If t has been +// killed, the stop is skipped, and beginPtraceStopLocked returns false. +// +// beginPtraceStopLocked does not signal t's tracer or wake it if it is +// waiting. +// +// Preconditions: The TaskSet mutex must be locked. The caller must be running +// on the task goroutine. +func (t *Task) beginPtraceStopLocked() bool { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + // This is analogous to Linux's kernel/signal.c:ptrace_stop() => ... => + // kernel/sched/core.c:__schedule() => signal_pending_state() check, which + // is what prevents tasks from entering ptrace-stops after being killed. + // Note that if t was SIGKILLed and beingPtraceStopLocked is being called + // for PTRACE_EVENT_EXIT, the task will have dequeued the signal before + // entering the exit path, so t.killable() will no longer return true. This + // is consistent with Linux: "Bugs: ... A SIGKILL signal may still cause a + // PTRACE_EVENT_EXIT stop before actual signal death. This may be changed + // in the future; SIGKILL is meant to always immediately kill tasks even + // under ptrace. Last confirmed on Linux 3.13." - ptrace(2) + if t.killedLocked() { + return false + } + t.beginInternalStopLocked(&ptraceStop{}) + return true +} + +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) ptraceTrapLocked(code int32) { + t.ptraceCode = code + t.ptraceSiginfo = &arch.SignalInfo{ + Signo: int32(linux.SIGTRAP), + Code: code, + } + t.ptraceSiginfo.SetPid(int32(t.tg.pidns.tids[t])) + t.ptraceSiginfo.SetUid(int32(t.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + if t.beginPtraceStopLocked() { + tracer := t.Tracer() + tracer.signalStop(t, arch.CLD_TRAPPED, int32(linux.SIGTRAP)) + tracer.tg.eventQueue.Notify(EventTraceeStop) + } +} + +// ptraceFreeze checks if t is in a ptraceStop. If so, it freezes the +// ptraceStop, temporarily preventing it from being removed by a concurrent +// Task.Kill, and returns true. Otherwise it returns false. +// +// Preconditions: The TaskSet mutex must be locked. The caller must be running +// on the task goroutine of t's tracer. +func (t *Task) ptraceFreeze() bool { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if t.stop == nil { + return false + } + s, ok := t.stop.(*ptraceStop) + if !ok { + return false + } + s.frozen = true + return true +} + +// ptraceUnfreeze ends the effect of a previous successful call to +// ptraceFreeze. +// +// Preconditions: t must be in a frozen ptraceStop. +func (t *Task) ptraceUnfreeze() { + // t.tg.signalHandlers is stable because t is in a frozen ptrace-stop, + // preventing its thread group from completing execve. + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + // Do this even if the task has been killed to ensure a panic if t.stop is + // nil or not a ptraceStop. + t.stop.(*ptraceStop).frozen = false + if t.killedLocked() { + t.endInternalStopLocked() + } +} + +// ptraceUnstop implements ptrace request PTRACE_CONT, PTRACE_SYSCALL, +// PTRACE_SINGLESTEP, PTRACE_SYSEMU, or PTRACE_SYSEMU_SINGLESTEP depending on +// mode and singlestep. +// +// Preconditions: t must be in a frozen ptrace stop. +// +// Postconditions: If ptraceUnstop returns nil, t will no longer be in a ptrace +// stop. +func (t *Task) ptraceUnstop(mode ptraceSyscallMode, singlestep bool, sig linux.Signal) error { + if sig != 0 && !sig.IsValid() { + return syserror.EIO + } + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + t.ptraceCode = int32(sig) + t.ptraceSyscallMode = mode + t.ptraceSinglestep = singlestep + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.endInternalStopLocked() + return nil +} + +func (t *Task) ptraceTraceme() error { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + if t.hasTracer() { + return syserror.EPERM + } + if t.parent == nil { + // In Linux, only init can not have a parent, and init is assumed never + // to invoke PTRACE_TRACEME. In the sentry, TGID 1 is an arbitrary user + // application that may invoke PTRACE_TRACEME; having no parent can + // also occur if all tasks in the parent thread group have exited, and + // failed to find a living thread group to reparent to. The former case + // is treated as if TGID 1 has an exited parent in an invisible + // ancestor PID namespace that is an owner of the root user namespace + // (and consequently has CAP_SYS_PTRACE), and the latter case is a + // special form of the exited parent case below. In either case, + // returning nil here is correct. + return nil + } + if !t.parent.CanTrace(t, true) { + return syserror.EPERM + } + if t.parent.exitState != TaskExitNone { + // Fail silently, as if we were successfully attached but then + // immediately detached. This is consistent with Linux. + return nil + } + t.ptraceTracer.Store(t.parent) + t.parent.ptraceTracees[t] = struct{}{} + return nil +} + +// ptraceAttach implements ptrace(PTRACE_ATTACH, target). t is the caller. +func (t *Task) ptraceAttach(target *Task) error { + if t.tg == target.tg { + return syserror.EPERM + } + if !t.CanTrace(target, true) { + return syserror.EPERM + } + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + if target.hasTracer() { + return syserror.EPERM + } + // Attaching to zombies and dead tasks is not permitted; the exit + // notification logic relies on this. Linux allows attaching to PF_EXITING + // tasks, though. + if target.exitState >= TaskExitZombie { + return syserror.EPERM + } + target.ptraceTracer.Store(t) + t.ptraceTracees[target] = struct{}{} + target.tg.signalHandlers.mu.Lock() + target.sendSignalLocked(&arch.SignalInfo{ + Signo: int32(linux.SIGSTOP), + Code: arch.SignalInfoUser, + }, false /* group */) + // Undocumented Linux feature: If the tracee is already group-stopped (and + // consequently will not report the SIGSTOP just sent), force it to leave + // and re-enter the stop so that it will switch to a ptrace-stop. + if target.stop == (*groupStop)(nil) { + target.groupStopRequired = true + target.endInternalStopLocked() + } + target.tg.signalHandlers.mu.Unlock() + return nil +} + +// ptraceDetach implements ptrace(PTRACE_DETACH, target, 0, sig). t is the +// caller. +// +// Preconditions: target must be a tracee of t in a frozen ptrace stop. +// +// Postconditions: If ptraceDetach returns nil, target will no longer be in a +// ptrace stop. +func (t *Task) ptraceDetach(target *Task, sig linux.Signal) error { + if sig != 0 && !sig.IsValid() { + return syserror.EIO + } + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + target.ptraceCode = int32(sig) + target.forgetTracerLocked() + delete(t.ptraceTracees, target) + return nil +} + +// exitPtrace is called in the exit path to detach all of t's tracees. +func (t *Task) exitPtrace() { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + for target := range t.ptraceTracees { + if target.ptraceOpts.ExitKill { + target.tg.signalHandlers.mu.Lock() + target.sendSignalLocked(&arch.SignalInfo{ + Signo: int32(linux.SIGKILL), + }, false /* group */) + target.tg.signalHandlers.mu.Unlock() + } + // Leave ptraceCode unchanged so that if the task is ptrace-stopped, it + // observes the ptraceCode it set before it entered the stop. I believe + // this is consistent with Linux. + target.forgetTracerLocked() + } + // "nil maps cannot be saved" + t.ptraceTracees = make(map[*Task]struct{}) +} + +// forgetTracerLocked detaches t's tracer and ensures that t is no longer +// ptrace-stopped. +// +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) forgetTracerLocked() { + t.ptraceOpts = ptraceOptions{} + t.ptraceSyscallMode = ptraceSyscallNone + t.ptraceSinglestep = false + t.ptraceTracer.Store((*Task)(nil)) + if t.exitTracerNotified && !t.exitTracerAcked { + t.exitTracerAcked = true + t.exitNotifyLocked(true) + } + // If t is ptrace-stopped, but its thread group is in a group stop and t is + // eligible to participate, make it do so. This is essentially the reverse + // of the special case in ptraceAttach, which converts a group stop to a + // ptrace stop. ("Handling of restart from group-stop is currently buggy, + // but the "as planned" behavior is to leave tracee stopped and waiting for + // SIGCONT." - ptrace(2)) + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if t.stop == nil { + return + } + if _, ok := t.stop.(*ptraceStop); ok { + if t.exitState < TaskExitInitiated && t.tg.groupStopPhase >= groupStopInitiated { + t.groupStopRequired = true + } + t.endInternalStopLocked() + } +} + +// ptraceSignalLocked is called after signal dequeueing to check if t should +// enter ptrace signal-delivery-stop. +// +// Preconditions: The signal mutex must be locked. The caller must be running +// on the task goroutine. +func (t *Task) ptraceSignalLocked(info *arch.SignalInfo) bool { + if linux.Signal(info.Signo) == linux.SIGKILL { + return false + } + if !t.hasTracer() { + return false + } + // The tracer might change this signal into a stop signal, in which case + // any SIGCONT received after the signal was originally dequeued should + // cancel it. This is consistent with Linux. + if t.tg.groupStopPhase == groupStopNone { + t.tg.groupStopPhase = groupStopDequeued + } + // Can't lock the TaskSet mutex while holding a signal mutex. + t.tg.signalHandlers.mu.Unlock() + defer t.tg.signalHandlers.mu.Lock() + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + tracer := t.Tracer() + if tracer == nil { + return false + } + t.ptraceCode = info.Signo + t.ptraceSiginfo = info + t.Debugf("Entering signal-delivery-stop for signal %d", info.Signo) + if t.beginPtraceStopLocked() { + tracer.signalStop(t, arch.CLD_TRAPPED, info.Signo) + tracer.tg.eventQueue.Notify(EventTraceeStop) + } + return true +} + +// ptraceSeccomp is called when a seccomp-bpf filter returns action +// SECCOMP_RET_TRACE to check if t should enter PTRACE_EVENT_SECCOMP stop. data +// is the lower 16 bits of the filter's return value. +func (t *Task) ptraceSeccomp(data uint16) bool { + if !t.hasTracer() { + return false + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if !t.ptraceOpts.TraceSeccomp { + return false + } + t.Debugf("Entering PTRACE_EVENT_SECCOMP stop") + t.ptraceEventLocked(_PTRACE_EVENT_SECCOMP, uint64(data)) + return true +} + +// ptraceSyscallEnter is called immediately before entering a syscall to check +// if t should enter ptrace syscall-enter-stop. +func (t *Task) ptraceSyscallEnter() (taskRunState, bool) { + if !t.hasTracer() { + return nil, false + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + switch t.ptraceSyscallMode { + case ptraceSyscallNone: + return nil, false + case ptraceSyscallIntercept: + t.Debugf("Entering syscall-enter-stop from PTRACE_SYSCALL") + t.ptraceSyscallStopLocked() + return (*runSyscallAfterSyscallEnterStop)(nil), true + case ptraceSyscallEmu: + t.Debugf("Entering syscall-enter-stop from PTRACE_SYSEMU") + t.ptraceSyscallStopLocked() + return (*runSyscallAfterSysemuStop)(nil), true + } + panic(fmt.Sprintf("Unknown ptraceSyscallMode: %v", t.ptraceSyscallMode)) +} + +// ptraceSyscallExit is called immediately after leaving a syscall to check if +// t should enter ptrace syscall-exit-stop. +func (t *Task) ptraceSyscallExit() { + if !t.hasTracer() { + return + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if t.ptraceSyscallMode != ptraceSyscallIntercept { + return + } + t.Debugf("Entering syscall-exit-stop") + t.ptraceSyscallStopLocked() +} + +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) ptraceSyscallStopLocked() { + code := int32(linux.SIGTRAP) + if t.ptraceOpts.SysGood { + code |= 0x80 + } + t.ptraceTrapLocked(code) +} + +type ptraceCloneKind int32 + +const ( + // ptraceCloneKindClone represents a call to Task.Clone where + // TerminationSignal is not SIGCHLD and Vfork is false. + ptraceCloneKindClone ptraceCloneKind = iota + + // ptraceCloneKindFork represents a call to Task.Clone where + // TerminationSignal is SIGCHLD and Vfork is false. + ptraceCloneKindFork + + // ptraceCloneKindVfork represents a call to Task.Clone where Vfork is + // true. + ptraceCloneKindVfork +) + +// ptraceClone is called at the end of a clone or fork syscall to check if t +// should enter PTRACE_EVENT_CLONE, PTRACE_EVENT_FORK, or PTRACE_EVENT_VFORK +// stop. child is the new task. +func (t *Task) ptraceClone(kind ptraceCloneKind, child *Task, opts *CloneOptions) bool { + if !t.hasTracer() { + return false + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + event := false + if !opts.Untraced { + switch kind { + case ptraceCloneKindClone: + if t.ptraceOpts.TraceClone { + t.Debugf("Entering PTRACE_EVENT_CLONE stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_CLONE, uint64(t.tg.pidns.tids[child])) + event = true + } + case ptraceCloneKindFork: + if t.ptraceOpts.TraceFork { + t.Debugf("Entering PTRACE_EVENT_FORK stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_FORK, uint64(t.tg.pidns.tids[child])) + event = true + } + case ptraceCloneKindVfork: + if t.ptraceOpts.TraceVfork { + t.Debugf("Entering PTRACE_EVENT_VFORK stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK, uint64(t.tg.pidns.tids[child])) + event = true + } + default: + panic(fmt.Sprintf("Unknown ptraceCloneKind: %v", kind)) + } + } + // "If the PTRACE_O_TRACEFORK, PTRACE_O_TRACEVFORK, or PTRACE_O_TRACECLONE + // options are in effect, then children created by, respectively, vfork(2) + // or clone(2) with the CLONE_VFORK flag, fork(2) or clone(2) with the exit + // signal set to SIGCHLD, and other kinds of clone(2), are automatically + // attached to the same tracer which traced their parent. SIGSTOP is + // delivered to the children, causing them to enter signal-delivery-stop + // after they exit the system call which created them." - ptrace(2) + // + // clone(2)'s documentation of CLONE_UNTRACED and CLONE_PTRACE is + // confusingly wrong; see kernel/fork.c:_do_fork() => copy_process() => + // include/linux/ptrace.h:ptrace_init_task(). + if event || opts.InheritTracer { + tracer := t.Tracer() + if tracer != nil { + child.ptraceTracer.Store(tracer) + tracer.ptraceTracees[child] = struct{}{} + // "Flags are inherited by new tracees created and "auto-attached" + // via active PTRACE_O_TRACEFORK, PTRACE_O_TRACEVFORK, or + // PTRACE_O_TRACECLONE options." + child.ptraceOpts = t.ptraceOpts + child.tg.signalHandlers.mu.Lock() + // If the child is PT_SEIZED (currently not possible in the sentry + // because PTRACE_SEIZE is unimplemented, but for future + // reference), Linux just sets JOBCTL_TRAP_STOP instead, so the + // child skips signal-delivery-stop and goes directly to + // group-stop. + // + // The child will self-t.interrupt() when its task goroutine starts + // running, so we don't have to. + child.pendingSignals.enqueue(&arch.SignalInfo{ + Signo: int32(linux.SIGSTOP), + }) + child.tg.signalHandlers.mu.Unlock() + } + } + return event +} + +// ptraceVforkDone is called after the end of a vfork stop to check if t should +// enter PTRACE_EVENT_VFORK_DONE stop. child is the new task's thread ID in t's +// PID namespace. +func (t *Task) ptraceVforkDone(child ThreadID) bool { + if !t.hasTracer() { + return false + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if !t.ptraceOpts.TraceVforkDone { + return false + } + t.Debugf("Entering PTRACE_EVENT_VFORK_DONE stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_VFORK_DONE, uint64(child)) + return true +} + +// ptraceExec is called at the end of an execve syscall to check if t should +// enter PTRACE_EVENT_EXEC stop. oldTID is t's thread ID, in its *tracer's* PID +// namespace, prior to the execve. (If t did not have a tracer at the time +// oldTID was read, oldTID may be 0. This is consistent with Linux.) +func (t *Task) ptraceExec(oldTID ThreadID) { + if !t.hasTracer() { + return + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + // Recheck with the TaskSet mutex locked. Most ptrace points don't need to + // do this because detaching resets ptrace options, but PTRACE_EVENT_EXEC + // is special because both TraceExec and !TraceExec do something if a + // tracer is attached. + if !t.hasTracer() { + return + } + if t.ptraceOpts.TraceExec { + t.Debugf("Entering PTRACE_EVENT_EXEC stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_EXEC, uint64(oldTID)) + return + } + // "If the PTRACE_O_TRACEEXEC option is not in effect for the execing + // tracee, and if the tracee was PTRACE_ATTACHed rather that [sic] + // PTRACE_SEIZEd, the kernel delivers an extra SIGTRAP to the tracee after + // execve(2) returns. This is an ordinary signal (similar to one which can + // be generated by `kill -TRAP`, not a special kind of ptrace-stop. + // Employing PTRACE_GETSIGINFO for this signal returns si_code set to 0 + // (SI_USER). This signal may be blocked by signal mask, and thus may be + // delivered (much) later." - ptrace(2) + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.sendSignalLocked(&arch.SignalInfo{ + Signo: int32(linux.SIGTRAP), + Code: arch.SignalInfoUser, + }, false /* group */) +} + +// ptraceExit is called early in the task exit path to check if t should enter +// PTRACE_EVENT_EXIT stop. +func (t *Task) ptraceExit() { + if !t.hasTracer() { + return + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if !t.ptraceOpts.TraceExit { + return + } + t.tg.signalHandlers.mu.Lock() + status := t.exitStatus.Status() + t.tg.signalHandlers.mu.Unlock() + t.Debugf("Entering PTRACE_EVENT_EXIT stop") + t.ptraceEventLocked(syscall.PTRACE_EVENT_EXIT, uint64(status)) +} + +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) ptraceEventLocked(event int32, msg uint64) { + t.ptraceEventMsg = msg + // """ + // PTRACE_EVENT stops are observed by the tracer as waitpid(2) returning + // with WIFSTOPPED(status), and WSTOPSIG(status) returns SIGTRAP. An + // additional bit is set in the higher byte of the status word: the value + // status>>8 will be + // + // (SIGTRAP | PTRACE_EVENT_foo << 8). + // + // ... + // + // """ - ptrace(2) + t.ptraceTrapLocked(int32(linux.SIGTRAP) | (event << 8)) +} + +// ptraceKill implements ptrace(PTRACE_KILL, target). t is the caller. +func (t *Task) ptraceKill(target *Task) error { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + if target.Tracer() != t { + return syserror.ESRCH + } + target.tg.signalHandlers.mu.Lock() + defer target.tg.signalHandlers.mu.Unlock() + // "This operation is deprecated; do not use it! Instead, send a SIGKILL + // directly using kill(2) or tgkill(2). The problem with PTRACE_KILL is + // that it requires the tracee to be in signal-delivery-stop, otherwise it + // may not work (i.e., may complete successfully but won't kill the + // tracee)." - ptrace(2) + if target.stop == nil { + return nil + } + if _, ok := target.stop.(*ptraceStop); !ok { + return nil + } + target.ptraceCode = int32(linux.SIGKILL) + target.endInternalStopLocked() + return nil +} + +// Ptrace implements the ptrace system call. +func (t *Task) Ptrace(req int64, pid ThreadID, addr, data usermem.Addr) error { + // PTRACE_TRACEME ignores all other arguments. + if req == syscall.PTRACE_TRACEME { + return t.ptraceTraceme() + } + // All other ptrace requests operate on a current or future tracee + // specified by pid. + target := t.tg.pidns.TaskWithID(pid) + if target == nil { + return syserror.ESRCH + } + + // PTRACE_ATTACH (and PTRACE_SEIZE, which is unimplemented) do not require + // that target is not already a tracee. + if req == syscall.PTRACE_ATTACH { + return t.ptraceAttach(target) + } + // PTRACE_KILL (and PTRACE_INTERRUPT, which is unimplemented) require that + // the target is a tracee, but does not require that it is ptrace-stopped. + if req == syscall.PTRACE_KILL { + return t.ptraceKill(target) + } + // All other ptrace requests require that the target is a ptrace-stopped + // tracee, and freeze the ptrace-stop so the tracee can be operated on. + t.tg.pidns.owner.mu.RLock() + if target.Tracer() != t { + t.tg.pidns.owner.mu.RUnlock() + return syserror.ESRCH + } + if !target.ptraceFreeze() { + t.tg.pidns.owner.mu.RUnlock() + // "Most ptrace commands (all except PTRACE_ATTACH, PTRACE_SEIZE, + // PTRACE_TRACEME, PTRACE_INTERRUPT, and PTRACE_KILL) require the + // tracee to be in a ptrace-stop, otherwise they fail with ESRCH." - + // ptrace(2) + return syserror.ESRCH + } + t.tg.pidns.owner.mu.RUnlock() + // Even if the target has a ptrace-stop active, the tracee's task goroutine + // may not yet have reached Task.doStop; wait for it to do so. This is safe + // because there's no way for target to initiate a ptrace-stop and then + // block (by calling Task.block) before entering it. + // + // Caveat: If tasks were just restored, the tracee's first call to + // Task.Activate (in Task.run) occurs before its first call to Task.doStop, + // which may block if the tracer's address space is active. + t.UninterruptibleSleepStart(true) + target.waitGoroutineStoppedOrExited() + t.UninterruptibleSleepFinish(true) + + // Resuming commands end the ptrace stop, but only if successful. + switch req { + case syscall.PTRACE_DETACH: + if err := t.ptraceDetach(target, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + case syscall.PTRACE_CONT: + if err := target.ptraceUnstop(ptraceSyscallNone, false, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + case syscall.PTRACE_SYSCALL: + if err := target.ptraceUnstop(ptraceSyscallIntercept, false, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + case syscall.PTRACE_SINGLESTEP: + if err := target.ptraceUnstop(ptraceSyscallNone, true, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + case syscall.PTRACE_SYSEMU: + if err := target.ptraceUnstop(ptraceSyscallEmu, false, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + case syscall.PTRACE_SYSEMU_SINGLESTEP: + if err := target.ptraceUnstop(ptraceSyscallEmu, true, linux.Signal(data)); err != nil { + target.ptraceUnfreeze() + return err + } + return nil + } + // All other ptrace requests expect us to unfreeze the stop. + defer target.ptraceUnfreeze() + + switch req { + case syscall.PTRACE_PEEKTEXT, syscall.PTRACE_PEEKDATA: + // "At the system call level, the PTRACE_PEEKTEXT, PTRACE_PEEKDATA, and + // PTRACE_PEEKUSER requests have a different API: they store the result + // at the address specified by the data parameter, and the return value + // is the error flag." - ptrace(2) + word := t.Arch().Native(0) + if _, err := usermem.CopyObjectIn(t, target.MemoryManager(), addr, word, usermem.IOOpts{ + IgnorePermissions: true, + }); err != nil { + return err + } + _, err := t.CopyOut(data, word) + return err + + case syscall.PTRACE_POKETEXT, syscall.PTRACE_POKEDATA: + _, err := usermem.CopyObjectOut(t, target.MemoryManager(), addr, t.Arch().Native(uintptr(data)), usermem.IOOpts{ + IgnorePermissions: true, + }) + return err + + case syscall.PTRACE_PEEKUSR: // aka PTRACE_PEEKUSER + n, err := target.Arch().PtracePeekUser(uintptr(addr)) + if err != nil { + return err + } + _, err = t.CopyOut(data, n) + return err + + case syscall.PTRACE_POKEUSR: // aka PTRACE_POKEUSER + return target.Arch().PtracePokeUser(uintptr(addr), uintptr(data)) + + case syscall.PTRACE_GETREGS: + // "Copy the tracee's general-purpose ... registers ... to the address + // data in the tracer. ... (addr is ignored.) Note that SPARC systems + // have the meaning of data and addr reversed ..." + _, err := target.Arch().PtraceGetRegs(&usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: data, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }) + return err + + case syscall.PTRACE_GETFPREGS: + _, err := target.Arch().PtraceGetFPRegs(&usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: data, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }) + return err + + case syscall.PTRACE_GETREGSET: + // "Read the tracee's registers. addr specifies, in an + // architecture-dependent way, the type of registers to be read. ... + // data points to a struct iovec, which describes the destination + // buffer's location and length. On return, the kernel modifies iov.len + // to indicate the actual number of bytes returned." - ptrace(2) + ars, err := t.CopyInIovecs(data, 1) + if err != nil { + return err + } + ar := ars.Head() + n, err := target.Arch().PtraceGetRegSet(uintptr(addr), &usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: ar.Start, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }, int(ar.Length())) + if err != nil { + return err + } + ar.End -= usermem.Addr(n) + return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar)) + + case syscall.PTRACE_SETREGS: + _, err := target.Arch().PtraceSetRegs(&usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: data, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }) + return err + + case syscall.PTRACE_SETFPREGS: + _, err := target.Arch().PtraceSetFPRegs(&usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: data, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }) + return err + + case syscall.PTRACE_SETREGSET: + ars, err := t.CopyInIovecs(data, 1) + if err != nil { + return err + } + ar := ars.Head() + n, err := target.Arch().PtraceSetRegSet(uintptr(addr), &usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: ar.Start, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + }, int(ar.Length())) + if err != nil { + return err + } + ar.End -= usermem.Addr(n) + return t.CopyOutIovecs(data, usermem.AddrRangeSeqOf(ar)) + + case syscall.PTRACE_GETSIGINFO: + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if target.ptraceSiginfo == nil { + return syserror.EINVAL + } + _, err := t.CopyOut(data, target.ptraceSiginfo) + return err + + case syscall.PTRACE_SETSIGINFO: + var info arch.SignalInfo + if _, err := t.CopyIn(data, &info); err != nil { + return err + } + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if target.ptraceSiginfo == nil { + return syserror.EINVAL + } + target.ptraceSiginfo = &info + return nil + + case PTRACE_GETSIGMASK: + if addr != linux.SignalSetSize { + return syserror.EINVAL + } + target.mu.Lock() + defer target.mu.Unlock() + _, err := t.CopyOut(data, target.tr.SignalMask) + return err + + case PTRACE_SETSIGMASK: + if addr != linux.SignalSetSize { + return syserror.EINVAL + } + var mask linux.SignalSet + if _, err := t.CopyIn(data, &mask); err != nil { + return err + } + // The target's task goroutine is stopped, so this is safe: + target.SetSignalMask(mask &^ UnblockableSignals) + return nil + + case syscall.PTRACE_SETOPTIONS: + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + validOpts := uintptr(_PTRACE_O_EXITKILL | syscall.PTRACE_O_TRACESYSGOOD | syscall.PTRACE_O_TRACECLONE | + syscall.PTRACE_O_TRACEEXEC | syscall.PTRACE_O_TRACEEXIT | syscall.PTRACE_O_TRACEFORK | + _PTRACE_O_TRACESECCOMP | syscall.PTRACE_O_TRACEVFORK | syscall.PTRACE_O_TRACEVFORKDONE) + if uintptr(data)&^validOpts != 0 { + return syserror.EINVAL + } + target.ptraceOpts = ptraceOptions{ + ExitKill: data&_PTRACE_O_EXITKILL != 0, + SysGood: data&syscall.PTRACE_O_TRACESYSGOOD != 0, + TraceClone: data&syscall.PTRACE_O_TRACECLONE != 0, + TraceExec: data&syscall.PTRACE_O_TRACEEXEC != 0, + TraceExit: data&syscall.PTRACE_O_TRACEEXIT != 0, + TraceFork: data&syscall.PTRACE_O_TRACEFORK != 0, + TraceSeccomp: data&_PTRACE_O_TRACESECCOMP != 0, + TraceVfork: data&syscall.PTRACE_O_TRACEVFORK != 0, + TraceVforkDone: data&syscall.PTRACE_O_TRACEVFORKDONE != 0, + } + return nil + + case syscall.PTRACE_GETEVENTMSG: + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + _, err := t.CopyOut(usermem.Addr(data), target.ptraceEventMsg) + return err + + default: + // PEEKSIGINFO is unimplemented but seems to have no users anywhere. + return syserror.EIO + } +} diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go new file mode 100644 index 000000000..635372993 --- /dev/null +++ b/pkg/sentry/kernel/rseq.go @@ -0,0 +1,118 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/hostcpu" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Restartable sequences, as described in https://lwn.net/Articles/650333/. + +// RSEQCriticalRegion describes a restartable sequence critical region. +type RSEQCriticalRegion struct { + // When a task in this thread group has its CPU preempted (as defined by + // platform.ErrContextCPUPreempted) or has a signal delivered to an + // application handler while its instruction pointer is in CriticalSection, + // set the instruction pointer to Restart and application register r10 (on + // amd64) to the former instruction pointer. + CriticalSection usermem.AddrRange + Restart usermem.Addr +} + +// RSEQAvailable returns true if t supports restartable sequences. +func (t *Task) RSEQAvailable() bool { + return t.k.useHostCores && t.k.Platform.DetectsCPUPreemption() +} + +// RSEQCriticalRegion returns a copy of t's thread group's current restartable +// sequence. +func (t *Task) RSEQCriticalRegion() RSEQCriticalRegion { + return *t.tg.rscr.Load().(*RSEQCriticalRegion) +} + +// SetRSEQCriticalRegion replaces t's thread group's restartable sequence. +// +// Preconditions: t.RSEQAvailable() == true. +func (t *Task) SetRSEQCriticalRegion(rscr RSEQCriticalRegion) error { + // These checks are somewhat more lenient than in Linux, which (bizarrely) + // requires rscr.CriticalSection to be non-empty and rscr.Restart to be + // outside of rscr.CriticalSection, even if rscr.CriticalSection.Start == 0 + // (which disables the critical region). + if rscr.CriticalSection.Start == 0 { + rscr.CriticalSection.End = 0 + rscr.Restart = 0 + t.tg.rscr.Store(&rscr) + return nil + } + if rscr.CriticalSection.Start >= rscr.CriticalSection.End { + return syserror.EINVAL + } + if rscr.CriticalSection.Contains(rscr.Restart) { + return syserror.EINVAL + } + // TODO: check that rscr.CriticalSection and rscr.Restart are in + // the application address range, for consistency with Linux + t.tg.rscr.Store(&rscr) + return nil +} + +// RSEQCPUAddr returns the address that RSEQ will keep updated with t's CPU +// number. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) RSEQCPUAddr() usermem.Addr { + return t.rseqCPUAddr +} + +// SetRSEQCPUAddr replaces the address that RSEQ will keep updated with t's CPU +// number. +// +// Preconditions: t.RSEQAvailable() == true. The caller must be running on the +// task goroutine. t's AddressSpace must be active. +func (t *Task) SetRSEQCPUAddr(addr usermem.Addr) error { + t.rseqCPUAddr = addr + if addr != 0 { + if err := t.rseqCopyOutCPU(); err != nil { + t.rseqCPUAddr = 0 + t.rseqCPU = -1 + return syserror.EINVAL // yes, EINVAL, not err or EFAULT + } + } else { + t.rseqCPU = -1 + } + return nil +} + +// Preconditions: The caller must be running on the task goroutine. t's +// AddressSpace must be active. +func (t *Task) rseqCopyOutCPU() error { + t.rseqCPU = int32(hostcpu.GetCPU()) + buf := t.CopyScratchBuffer(4) + usermem.ByteOrder.PutUint32(buf, uint32(t.rseqCPU)) + _, err := t.CopyOutBytes(t.rseqCPUAddr, buf) + return err +} + +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) rseqInterrupt() { + rscr := t.tg.rscr.Load().(*RSEQCriticalRegion) + if ip := t.Arch().IP(); rscr.CriticalSection.Contains(usermem.Addr(ip)) { + t.Debugf("Interrupted RSEQ critical section at %#x; restarting at %#x", ip, rscr.Restart) + t.Arch().SetIP(uintptr(rscr.Restart)) + t.Arch().SetRSEQInterruptedIP(ip) + } +} diff --git a/pkg/sentry/kernel/sched/BUILD b/pkg/sentry/kernel/sched/BUILD new file mode 100644 index 000000000..b533c51c4 --- /dev/null +++ b/pkg/sentry/kernel/sched/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "sched", + srcs = [ + "cpuset.go", + "sched.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched", + visibility = ["//pkg/sentry:internal"], +) + +go_test( + name = "sched_test", + size = "small", + srcs = ["cpuset_test.go"], + embed = [":sched"], +) diff --git a/pkg/sentry/kernel/sched/cpuset.go b/pkg/sentry/kernel/sched/cpuset.go new file mode 100644 index 000000000..0a97603f0 --- /dev/null +++ b/pkg/sentry/kernel/sched/cpuset.go @@ -0,0 +1,105 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sched + +import "math/bits" + +const ( + bitsPerByte = 8 + bytesPerLong = 8 // only for 64-bit architectures +) + +// CPUSet contains a bitmap to record CPU information. +// +// Note that this definition is only correct for little-endian architectures, +// since Linux's cpumask_t uses unsigned long. +type CPUSet []byte + +// CPUSetSize returns the size in bytes of a CPUSet that can contain num cpus. +func CPUSetSize(num uint) uint { + // NOTE: Applications may expect that the size of a CPUSet in + // bytes is always a multiple of sizeof(unsigned long), since this is true + // in Linux. Thus we always round up. + bytes := (num + bitsPerByte - 1) / bitsPerByte + longs := (bytes + bytesPerLong - 1) / bytesPerLong + return longs * bytesPerLong +} + +// NewCPUSet returns a CPUSet for the given number of CPUs which initially +// contains no CPUs. +func NewCPUSet(num uint) CPUSet { + return CPUSet(make([]byte, CPUSetSize(num))) +} + +// NewFullCPUSet returns a CPUSet for the given number of CPUs, all of which +// are present in the set. +func NewFullCPUSet(num uint) CPUSet { + c := NewCPUSet(num) + var i uint + for ; i < num/bitsPerByte; i++ { + c[i] = 0xff + } + if rem := num % bitsPerByte; rem != 0 { + c[i] = (1 << rem) - 1 + } + return c +} + +// Size returns the size of 'c' in bytes. +func (c CPUSet) Size() uint { + return uint(len(c)) +} + +// NumCPUs returns how many cpus are set in the CPUSet. +func (c CPUSet) NumCPUs() uint { + var n int + for _, b := range c { + n += bits.OnesCount8(b) + } + return uint(n) +} + +// Copy returns a copy of the CPUSet. +func (c CPUSet) Copy() CPUSet { + return append(CPUSet(nil), c...) +} + +// Set sets the bit corresponding to cpu. +func (c *CPUSet) Set(cpu uint) { + (*c)[cpu/bitsPerByte] |= 1 << (cpu % bitsPerByte) +} + +// ClearAbove clears bits corresponding to cpu and all higher cpus. +func (c *CPUSet) ClearAbove(cpu uint) { + i := cpu / bitsPerByte + if i >= c.Size() { + return + } + (*c)[i] &^= 0xff << (cpu % bitsPerByte) + for i++; i < c.Size(); i++ { + (*c)[i] = 0 + } +} + +// ForEachCPU iterates over the CPUSet and calls fn with the cpu index if +// it's set. +func (c CPUSet) ForEachCPU(fn func(uint)) { + for i := uint(0); i < c.Size()*bitsPerByte; i++ { + bit := uint(1) << (i & (bitsPerByte - 1)) + if uint(c[i/bitsPerByte])&bit == bit { + fn(i) + } + } +} diff --git a/pkg/sentry/kernel/sched/cpuset_test.go b/pkg/sentry/kernel/sched/cpuset_test.go new file mode 100644 index 000000000..8a6e12958 --- /dev/null +++ b/pkg/sentry/kernel/sched/cpuset_test.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sched + +import ( + "testing" +) + +func TestNumCPUs(t *testing.T) { + for i := uint(0); i < 1024; i++ { + c := NewCPUSet(i) + for j := uint(0); j < i; j++ { + c.Set(j) + } + n := c.NumCPUs() + if n != i { + t.Errorf("got wrong number of cpus %d, want %d", n, i) + } + } +} + +func TestClearAbove(t *testing.T) { + const n = 1024 + c := NewFullCPUSet(n) + for i := uint(0); i < n; i++ { + cpu := n - i + c.ClearAbove(cpu) + if got := c.NumCPUs(); got != cpu { + t.Errorf("iteration %d: got %d cpus, wanted %d", i, got, cpu) + } + } +} diff --git a/pkg/sentry/kernel/sched/sched.go b/pkg/sentry/kernel/sched/sched.go new file mode 100644 index 000000000..f1de1da60 --- /dev/null +++ b/pkg/sentry/kernel/sched/sched.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sched implements scheduler related features. +package sched diff --git a/pkg/sentry/kernel/seccomp.go b/pkg/sentry/kernel/seccomp.go new file mode 100644 index 000000000..b7c4a507f --- /dev/null +++ b/pkg/sentry/kernel/seccomp.go @@ -0,0 +1,205 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/bpf" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const maxSyscallFilterInstructions = 1 << 15 + +type seccompResult int + +const ( + // seccompResultDeny indicates that a syscall should not be executed. + seccompResultDeny seccompResult = iota + + // seccompResultAllow indicates that a syscall should be executed. + seccompResultAllow + + // seccompResultKill indicates that the task should be killed immediately, + // with the exit status indicating that the task was killed by SIGSYS. + seccompResultKill + + // seccompResultTrace indicates that a ptracer was successfully notified as + // a result of a SECCOMP_RET_TRACE. + seccompResultTrace +) + +// seccompData is equivalent to struct seccomp_data, which contains the data +// passed to seccomp-bpf filters. +type seccompData struct { + // nr is the system call number. + nr int32 + + // arch is an AUDIT_ARCH_* value indicating the system call convention. + arch uint32 + + // instructionPointer is the value of the instruction pointer at the time + // of the system call. + instructionPointer uint64 + + // args contains the first 6 system call arguments. + args [6]uint64 +} + +func (d *seccompData) asBPFInput() bpf.Input { + return bpf.InputBytes{binary.Marshal(nil, usermem.ByteOrder, d), usermem.ByteOrder} +} + +func seccompSiginfo(t *Task, errno, sysno int32, ip usermem.Addr) *arch.SignalInfo { + si := &arch.SignalInfo{ + Signo: int32(linux.SIGSYS), + Errno: errno, + Code: arch.SYS_SECCOMP, + } + si.SetCallAddr(uint64(ip)) + si.SetSyscall(sysno) + si.SetArch(t.SyscallTable().AuditNumber) + return si +} + +// checkSeccompSyscall applies the task's seccomp filters before the execution +// of syscall sysno at instruction pointer ip. (These parameters must be passed +// in because vsyscalls do not use the values in t.Arch().) +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) checkSeccompSyscall(sysno int32, args arch.SyscallArguments, ip usermem.Addr) seccompResult { + result := t.evaluateSyscallFilters(sysno, args, ip) + switch result & linux.SECCOMP_RET_ACTION { + case linux.SECCOMP_RET_TRAP: + // "Results in the kernel sending a SIGSYS signal to the triggering + // task without executing the system call. ... The SECCOMP_RET_DATA + // portion of the return value will be passed as si_errno." - + // Documentation/prctl/seccomp_filter.txt + t.SendSignal(seccompSiginfo(t, int32(result&linux.SECCOMP_RET_DATA), sysno, ip)) + return seccompResultDeny + + case linux.SECCOMP_RET_ERRNO: + // "Results in the lower 16-bits of the return value being passed to + // userland as the errno without executing the system call." + t.Arch().SetReturn(-uintptr(result & linux.SECCOMP_RET_DATA)) + return seccompResultDeny + + case linux.SECCOMP_RET_TRACE: + // "When returned, this value will cause the kernel to attempt to + // notify a ptrace()-based tracer prior to executing the system call. + // If there is no tracer present, -ENOSYS is returned to userland and + // the system call is not executed." + if t.ptraceSeccomp(uint16(result & linux.SECCOMP_RET_DATA)) { + return seccompResultTrace + } + // This useless-looking temporary is needed because Go. + tmp := uintptr(syscall.ENOSYS) + t.Arch().SetReturn(-tmp) + return seccompResultDeny + + case linux.SECCOMP_RET_ALLOW: + // "Results in the system call being executed." + return seccompResultAllow + + case linux.SECCOMP_RET_KILL: + // "Results in the task exiting immediately without executing the + // system call. The exit status of the task will be SIGSYS, not + // SIGKILL." + fallthrough + default: // consistent with Linux + return seccompResultKill + } +} + +func (t *Task) evaluateSyscallFilters(sysno int32, args arch.SyscallArguments, ip usermem.Addr) uint32 { + data := seccompData{ + nr: sysno, + arch: t.tc.st.AuditNumber, + instructionPointer: uint64(ip), + } + // data.args is []uint64 and args is []arch.SyscallArgument (uintptr), so + // we can't do any slicing tricks or even use copy/append here. + for i, arg := range args { + if i >= len(data.args) { + break + } + data.args[i] = arg.Uint64() + } + input := data.asBPFInput() + + ret := uint32(linux.SECCOMP_RET_ALLOW) + // "Every filter successfully installed will be evaluated (in reverse + // order) for each system call the task makes." - kernel/seccomp.c + for i := len(t.syscallFilters) - 1; i >= 0; i-- { + thisRet, err := bpf.Exec(t.syscallFilters[i], input) + if err != nil { + t.Debugf("seccomp-bpf filter %d returned error: %v", i, err) + thisRet = linux.SECCOMP_RET_KILL + } + // "If multiple filters exist, the return value for the evaluation of a + // given system call will always use the highest precedent value." - + // Documentation/prctl/seccomp_filter.txt + // + // (Note that this contradicts prctl(2): "If the filters permit prctl() + // calls, then additional filters can be added; they are run in order + // until the first non-allow result is seen." prctl(2) is incorrect.) + // + // "The ordering ensures that a min_t() over composed return values + // always selects the least permissive choice." - + // include/uapi/linux/seccomp.h + if (thisRet & linux.SECCOMP_RET_ACTION) < (ret & linux.SECCOMP_RET_ACTION) { + ret = thisRet + } + } + + return ret +} + +// AppendSyscallFilter adds BPF program p as a system call filter. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) AppendSyscallFilter(p bpf.Program) error { + // Cap the combined length of all syscall filters (plus a penalty of 4 + // instructions per filter beyond the first) to + // maxSyscallFilterInstructions. (This restriction is inherited from + // Linux.) + totalLength := p.Length() + for _, f := range t.syscallFilters { + totalLength += f.Length() + 4 + } + if totalLength > maxSyscallFilterInstructions { + return syserror.ENOMEM + } + t.mu.Lock() + defer t.mu.Unlock() + t.syscallFilters = append(t.syscallFilters, p) + return nil +} + +// SeccompMode returns a SECCOMP_MODE_* constant indicating the task's current +// seccomp syscall filtering mode, appropriate for both prctl(PR_GET_SECCOMP) +// and /proc/[pid]/status. +func (t *Task) SeccompMode() int { + t.mu.Lock() + defer t.mu.Unlock() + if len(t.syscallFilters) > 0 { + return linux.SECCOMP_MODE_FILTER + } + return linux.SECCOMP_MODE_NONE +} diff --git a/pkg/sentry/kernel/semaphore/BUILD b/pkg/sentry/kernel/semaphore/BUILD new file mode 100644 index 000000000..1656ad126 --- /dev/null +++ b/pkg/sentry/kernel/semaphore/BUILD @@ -0,0 +1,62 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_template_instance( + name = "waiter_list", + out = "waiter_list.go", + package = "semaphore", + prefix = "waiter", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*waiter", + }, +) + +go_stateify( + name = "semaphore_state", + srcs = [ + "semaphore.go", + "waiter_list.go", + ], + out = "semaphore_autogen_state.go", + package = "semaphore", +) + +go_library( + name = "semaphore", + srcs = [ + "semaphore.go", + "semaphore_autogen_state.go", + "waiter_list.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/semaphore", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/time", + "//pkg/state", + "//pkg/state/statefile", + "//pkg/syserror", + ], +) + +go_test( + name = "semaphore_test", + size = "small", + srcs = ["semaphore_test.go"], + embed = [":semaphore"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/kernel/auth", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/kernel/semaphore/semaphore.go b/pkg/sentry/kernel/semaphore/semaphore.go new file mode 100644 index 000000000..19ad5d537 --- /dev/null +++ b/pkg/sentry/kernel/semaphore/semaphore.go @@ -0,0 +1,473 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semaphore implements System V semaphores. +package semaphore + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + valueMax = 32767 // SEMVMX + + // semaphoresMax is "maximum number of semaphores per semaphore ID" (SEMMSL). + semaphoresMax = 32000 + + // setMax is "system-wide limit on the number of semaphore sets" (SEMMNI). + setsMax = 32000 + + // semaphoresTotalMax is "system-wide limit on the number of semaphores" + // (SEMMNS = SEMMNI*SEMMSL). + semaphoresTotalMax = 1024000000 +) + +// Registry maintains a set of semaphores that can be found by key or ID. +type Registry struct { + // mu protects all fields below. + mu sync.Mutex `state:"nosave"` + semaphores map[int32]*Set + lastIDUsed int32 +} + +// Set represents a set of semaphores that can be operated atomically. +type Set struct { + // Id is a handle that identifies the set. + ID int32 + + // key is an user provided key that can be shared between processes. + key int32 + + // creator is the user that created the set. Immutable. + creator fs.FileOwner + + // mu protects all fields below. + mu sync.Mutex `state:"nosave"` + owner fs.FileOwner + perms fs.FilePermissions + opTime ktime.Time + changeTime ktime.Time + sems []sem + + // dead is set to true when the set is removed and can't be reached anymore. + // All waiters must wake up and fail when set is dead. + dead bool +} + +// sem represents a single semanphore from a set. +type sem struct { + value int16 + waiters waiterList `state:"zerovalue"` +} + +// waiter represents a caller that is waiting for the semaphore value to +// become positive or zero. +type waiter struct { + waiterEntry + + // value represents how much resource the waiter needs to wake up. + value int16 + ch chan struct{} +} + +// NewRegistry creates a new semaphore set registry. +func NewRegistry() *Registry { + return &Registry{semaphores: make(map[int32]*Set)} +} + +// FindOrCreate searches for a semaphore set that matches 'key'. If not found, +// it may create a new one if requested. If private is true, key is ignored and +// a new set is always created. If create is false, it fails if a set cannot +// be found. If exclusive is true, it fails if a set with the same key already +// exists. +func (r *Registry) FindOrCreate(ctx context.Context, key, nsems int32, mode linux.FileMode, private, create, exclusive bool) (*Set, error) { + if nsems < 0 || nsems > semaphoresMax { + return nil, syserror.EINVAL + } + + r.mu.Lock() + defer r.mu.Unlock() + + if !private { + // Look up an existing semaphore. + if set := r.findByKey(key); set != nil { + // Check that caller can access semaphore set. + creds := auth.CredentialsFromContext(ctx) + if !set.checkPerms(creds, fs.PermsFromMode(mode)) { + return nil, syserror.EACCES + } + + // Validate parameters. + if nsems > int32(set.size()) { + return nil, syserror.EINVAL + } + if create && exclusive { + return nil, syserror.EEXIST + } + return set, nil + } + + if !create { + // Semaphore not found and should not be created. + return nil, syserror.ENOENT + } + } + + // Zero is only valid if an existing set is found. + if nsems == 0 { + return nil, syserror.EINVAL + } + + // Apply system limits. + if len(r.semaphores) >= setsMax { + return nil, syserror.EINVAL + } + if r.totalSems() > int(semaphoresTotalMax-nsems) { + return nil, syserror.EINVAL + } + + // Finally create a new set. + owner := fs.FileOwnerFromContext(ctx) + perms := fs.FilePermsFromMode(mode) + return r.newSet(ctx, key, owner, owner, perms, nsems) +} + +// RemoveID removes set with give 'id' from the registry and marks the set as +// dead. All waiters will be awakened and fail. +func (r *Registry) RemoveID(id int32, creds *auth.Credentials) error { + r.mu.Lock() + defer r.mu.Unlock() + + set := r.semaphores[id] + if set == nil { + return syserror.EINVAL + } + + // "The effective user ID of the calling process must match the creator or + // owner of the semaphore set, or the caller must be privileged." + if !set.checkCredentials(creds) && !set.checkCapability(creds) { + return syserror.EACCES + } + + delete(r.semaphores, set.ID) + set.destroy() + return nil +} + +func (r *Registry) newSet(ctx context.Context, key int32, owner, creator fs.FileOwner, perms fs.FilePermissions, nsems int32) (*Set, error) { + set := &Set{ + key: key, + owner: owner, + creator: owner, + perms: perms, + changeTime: ktime.NowFromContext(ctx), + sems: make([]sem, nsems), + } + + // Find the next available ID. + for id := r.lastIDUsed + 1; id != r.lastIDUsed; id++ { + // Handle wrap around. + if id < 0 { + id = 0 + continue + } + if r.semaphores[id] == nil { + r.lastIDUsed = id + r.semaphores[id] = set + set.ID = id + return set, nil + } + } + + log.Warningf("Semaphore map is full, they must be leaking") + return nil, syserror.ENOMEM +} + +// FindByID looks up a set given an ID. +func (r *Registry) FindByID(id int32) *Set { + r.mu.Lock() + defer r.mu.Unlock() + return r.semaphores[id] +} + +func (r *Registry) findByKey(key int32) *Set { + for _, v := range r.semaphores { + if v.key == key { + return v + } + } + return nil +} + +func (r *Registry) totalSems() int { + totalSems := 0 + for _, v := range r.semaphores { + totalSems += v.size() + } + return totalSems +} + +func (s *Set) findSem(num int32) *sem { + if num < 0 || int(num) >= s.size() { + return nil + } + return &s.sems[num] +} + +func (s *Set) size() int { + return len(s.sems) +} + +// Change changes some fields from the set atomically. +func (s *Set) Change(ctx context.Context, creds *auth.Credentials, owner fs.FileOwner, perms fs.FilePermissions) error { + s.mu.Lock() + defer s.mu.Unlock() + + // "The effective UID of the calling process must match the owner or creator + // of the semaphore set, or the caller must be privileged." + if !s.checkCredentials(creds) && !s.checkCapability(creds) { + return syserror.EACCES + } + + s.owner = owner + s.perms = perms + s.changeTime = ktime.NowFromContext(ctx) + return nil +} + +// SetVal overrides a semaphore value, waking up waiters as needed. +func (s *Set) SetVal(ctx context.Context, num int32, val int16, creds *auth.Credentials) error { + if val < 0 || val > valueMax { + return syserror.ERANGE + } + + s.mu.Lock() + defer s.mu.Unlock() + + // "The calling process must have alter permission on the semaphore set." + if !s.checkPerms(creds, fs.PermMask{Write: true}) { + return syserror.EACCES + } + + sem := s.findSem(num) + if sem == nil { + return syserror.ERANGE + } + + // TODO: Clear undo entries in all processes + sem.value = val + s.changeTime = ktime.NowFromContext(ctx) + sem.wakeWaiters() + return nil +} + +// GetVal returns a semaphore value. +func (s *Set) GetVal(num int32, creds *auth.Credentials) (int16, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // "The calling process must have read permission on the semaphore set." + if !s.checkPerms(creds, fs.PermMask{Read: true}) { + return 0, syserror.EACCES + } + + sem := s.findSem(num) + if sem == nil { + return 0, syserror.ERANGE + } + return sem.value, nil +} + +// ExecuteOps attempts to execute a list of operations to the set. It only +// suceeds when all operations can be applied. No changes are made if it fails. +// +// On failure, it may return an error (retries are hopeless) or it may return +// a channel that can be waited on before attempting again. +func (s *Set) ExecuteOps(ctx context.Context, ops []linux.Sembuf, creds *auth.Credentials) (chan struct{}, int32, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Did it race with a removal operation? + if s.dead { + return nil, 0, syserror.EIDRM + } + + // Validate the operations. + readOnly := true + for _, op := range ops { + if s.findSem(int32(op.SemNum)) == nil { + return nil, 0, syserror.EFBIG + } + if op.SemOp != 0 { + readOnly = false + } + } + + if !s.checkPerms(creds, fs.PermMask{Read: readOnly, Write: !readOnly}) { + return nil, 0, syserror.EACCES + } + + ch, num, err := s.executeOps(ctx, ops) + if err != nil { + return nil, 0, err + } + return ch, num, nil +} + +func (s *Set) executeOps(ctx context.Context, ops []linux.Sembuf) (chan struct{}, int32, error) { + // Changes to semaphores go to this slice temporarily until they all succeed. + tmpVals := make([]int16, len(s.sems)) + for i := range s.sems { + tmpVals[i] = s.sems[i].value + } + + for _, op := range ops { + sem := &s.sems[op.SemNum] + if op.SemOp == 0 { + // Handle 'wait for zero' operation. + if tmpVals[op.SemNum] != 0 { + // Semaphore isn't 0, must wait. + if op.SemFlg&linux.IPC_NOWAIT != 0 { + return nil, 0, syserror.ErrWouldBlock + } + + w := newWaiter(op.SemOp) + sem.waiters.PushBack(w) + return w.ch, int32(op.SemNum), nil + } + } else { + if op.SemOp < 0 { + // Handle 'wait' operation. + if -op.SemOp > valueMax { + return nil, 0, syserror.ERANGE + } + if -op.SemOp > tmpVals[op.SemNum] { + // Not enough resources, must wait. + if op.SemFlg&linux.IPC_NOWAIT != 0 { + return nil, 0, syserror.ErrWouldBlock + } + + w := newWaiter(op.SemOp) + sem.waiters.PushBack(w) + return w.ch, int32(op.SemNum), nil + } + } else { + // op.SemOp > 0: Handle 'signal' operation. + if tmpVals[op.SemNum] > valueMax-op.SemOp { + return nil, 0, syserror.ERANGE + } + } + + tmpVals[op.SemNum] += op.SemOp + } + } + + // All operations succeeded, apply them. + // TODO: handle undo operations. + for i, v := range tmpVals { + s.sems[i].value = v + s.sems[i].wakeWaiters() + } + s.opTime = ktime.NowFromContext(ctx) + return nil, 0, nil +} + +// AbortWait notifies that a waiter is giving up and will not wait on the +// channel anymore. +func (s *Set) AbortWait(num int32, ch chan struct{}) { + s.mu.Lock() + defer s.mu.Unlock() + + sem := &s.sems[num] + for w := sem.waiters.Front(); w != nil; w = w.Next() { + if w.ch == ch { + sem.waiters.Remove(w) + return + } + } + // Waiter may not be found in case it raced with wakeWaiters(). +} + +func (s *Set) checkCredentials(creds *auth.Credentials) bool { + return s.owner.UID == creds.EffectiveKUID || + s.owner.GID == creds.EffectiveKGID || + s.creator.UID == creds.EffectiveKUID || + s.creator.GID == creds.EffectiveKGID +} + +func (s *Set) checkCapability(creds *auth.Credentials) bool { + return creds.HasCapability(linux.CAP_IPC_OWNER) && creds.UserNamespace.MapFromKUID(s.owner.UID).Ok() +} + +func (s *Set) checkPerms(creds *auth.Credentials, reqPerms fs.PermMask) bool { + // Are we owner, or in group, or other? + p := s.perms.Other + if s.owner.UID == creds.EffectiveKUID { + p = s.perms.User + } else if creds.InGroup(s.owner.GID) { + p = s.perms.Group + } + + // Are permissions satisfied without capability checks? + if p.SupersetOf(reqPerms) { + return true + } + + return s.checkCapability(creds) +} + +func (s *Set) destroy() { + s.mu.Lock() + defer s.mu.Unlock() + + // Notify all waiters. Tney will fail on the next attempt to execute + // operations and return error. + s.dead = true + for _, s := range s.sems { + for w := s.waiters.Front(); w != nil; w = w.Next() { + w.ch <- struct{}{} + } + s.waiters.Reset() + } +} + +// wakeWaiters goes over all waiters and checks which of them can be notified. +func (s *sem) wakeWaiters() { + // Note that this will release all waiters waiting for 0 too. + for w := s.waiters.Front(); w != nil; { + if s.value < w.value { + // Still blocked, skip it. + continue + } + w.ch <- struct{}{} + old := w + w = w.Next() + s.waiters.Remove(old) + } +} + +func newWaiter(val int16) *waiter { + return &waiter{ + value: val, + ch: make(chan struct{}, 1), + } +} diff --git a/pkg/sentry/kernel/semaphore/semaphore_test.go b/pkg/sentry/kernel/semaphore/semaphore_test.go new file mode 100644 index 000000000..0386586ab --- /dev/null +++ b/pkg/sentry/kernel/semaphore/semaphore_test.go @@ -0,0 +1,172 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semaphore + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func executeOps(ctx context.Context, t *testing.T, set *Set, ops []linux.Sembuf, block bool) chan struct{} { + ch, _, err := set.executeOps(ctx, ops) + if err != nil { + t.Fatalf("ExecuteOps(ops) failed, err: %v, ops: %+v", err, ops) + } + if block { + if ch == nil { + t.Fatalf("ExecuteOps(ops) got: nil, expected: !nil, ops: %+v", ops) + } + if signalled(ch) { + t.Fatalf("ExecuteOps(ops) channel should not have been signalled, ops: %+v", ops) + } + } else { + if ch != nil { + t.Fatalf("ExecuteOps(ops) got: %v, expected: nil, ops: %+v", ch, ops) + } + } + return ch +} + +func signalled(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +func TestBasic(t *testing.T) { + ctx := contexttest.Context(t) + set := &Set{ID: 123, sems: make([]sem, 1)} + ops := []linux.Sembuf{ + linux.Sembuf{SemOp: 1}, + } + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = -1 + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = -1 + ch1 := executeOps(ctx, t, set, ops, true) + + ops[0].SemOp = 1 + executeOps(ctx, t, set, ops, false) + if !signalled(ch1) { + t.Fatalf("ExecuteOps(ops) channel should not have been signalled, ops: %+v", ops) + } +} + +func TestWaitForZero(t *testing.T) { + ctx := contexttest.Context(t) + set := &Set{ID: 123, sems: make([]sem, 1)} + ops := []linux.Sembuf{ + linux.Sembuf{SemOp: 0}, + } + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = -2 + ch1 := executeOps(ctx, t, set, ops, true) + + ops[0].SemOp = 0 + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = 1 + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = 0 + chZero1 := executeOps(ctx, t, set, ops, true) + + ops[0].SemOp = 0 + chZero2 := executeOps(ctx, t, set, ops, true) + + ops[0].SemOp = 1 + executeOps(ctx, t, set, ops, false) + if !signalled(ch1) { + t.Fatalf("ExecuteOps(ops) channel should have been signalled, ops: %+v, set: %+v", ops, set) + } + + ops[0].SemOp = -2 + executeOps(ctx, t, set, ops, false) + if !signalled(chZero1) { + t.Fatalf("ExecuteOps(ops) channel zero 1 should have been signalled, ops: %+v, set: %+v", ops, set) + } + if !signalled(chZero2) { + t.Fatalf("ExecuteOps(ops) channel zero 2 should have been signalled, ops: %+v, set: %+v", ops, set) + } +} + +func TestNoWait(t *testing.T) { + ctx := contexttest.Context(t) + set := &Set{ID: 123, sems: make([]sem, 1)} + ops := []linux.Sembuf{ + linux.Sembuf{SemOp: 1}, + } + executeOps(ctx, t, set, ops, false) + + ops[0].SemOp = -2 + ops[0].SemFlg = linux.IPC_NOWAIT + if _, _, err := set.executeOps(ctx, ops); err != syserror.ErrWouldBlock { + t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock) + } + + ops[0].SemOp = 0 + ops[0].SemFlg = linux.IPC_NOWAIT + if _, _, err := set.executeOps(ctx, ops); err != syserror.ErrWouldBlock { + t.Fatalf("ExecuteOps(ops) wrong result, got: %v, expected: %v", err, syserror.ErrWouldBlock) + } +} + +func TestUnregister(t *testing.T) { + ctx := contexttest.Context(t) + r := NewRegistry() + set, err := r.FindOrCreate(ctx, 123, 2, linux.FileMode(0x600), true, true, true) + if err != nil { + t.Fatalf("FindOrCreate() failed, err: %v", err) + } + if got := r.FindByID(set.ID); got.ID != set.ID { + t.Fatalf("FindById(%d) failed, got: %+v, expected: %+v", set.ID, got, set) + } + + ops := []linux.Sembuf{ + linux.Sembuf{SemOp: -1}, + } + chs := make([]chan struct{}, 0, 5) + for i := 0; i < 5; i++ { + ch := executeOps(ctx, t, set, ops, true) + chs = append(chs, ch) + } + + creds := auth.CredentialsFromContext(ctx) + if err := r.RemoveID(set.ID, creds); err != nil { + t.Fatalf("RemoveID(%d) failed, err: %v", set.ID, err) + } + if !set.dead { + t.Fatalf("set is not dead: %+v", set) + } + if got := r.FindByID(set.ID); got != nil { + t.Fatalf("FindById(%d) failed, got: %+v, expected: nil", set.ID, got) + } + for i, ch := range chs { + if !signalled(ch) { + t.Fatalf("channel %d should have been signalled", i) + } + } +} diff --git a/pkg/sentry/kernel/sessions.go b/pkg/sentry/kernel/sessions.go new file mode 100644 index 000000000..53d8fb844 --- /dev/null +++ b/pkg/sentry/kernel/sessions.go @@ -0,0 +1,462 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SessionID is the public identifier. +type SessionID ThreadID + +// ProcessGroupID is the public identifier. +type ProcessGroupID ThreadID + +// Session contains a leader threadgroup and a list of ProcessGroups. +type Session struct { + refs refs.AtomicRefCount + + // leader is the originator of the Session. + // + // Note that this may no longer be running (and may be reaped), so the + // ID is cached upon initial creation. The leader is still required + // however, since its PIDNamespace defines the scope of the Session. + // + // The leader is immutable. + leader *ThreadGroup + + // id is the cached identifier in the leader's namespace. + // + // The id is immutable. + id SessionID + + // ProcessGroups is a list of process groups in this Session. This is + // protected by TaskSet.mu. + processGroups processGroupList + + // sessionEntry is the embed for TaskSet.sessions. This is protected by + // TaskSet.mu. + sessionEntry +} + +// incRef grabs a reference. +func (s *Session) incRef() { + s.refs.IncRef() +} + +// decRef drops a reference. +// +// Precondition: callers must hold TaskSet.mu for writing. +func (s *Session) decRef() { + s.refs.DecRefWithDestructor(func() { + // Remove translations from the leader. + for ns := s.leader.pidns; ns != nil; ns = ns.parent { + id := ns.sids[s] + delete(ns.sids, s) + delete(ns.sessions, id) + } + + // Remove from the list of global Sessions. + s.leader.pidns.owner.sessions.Remove(s) + }) +} + +// ProcessGroup contains an originator threadgroup and a parent Session. +type ProcessGroup struct { + refs refs.AtomicRefCount // not exported. + + // originator is the originator of the group. + // + // See note re: leader in Session. The same applies here. + // + // The originator is immutable. + originator *ThreadGroup + + // id is the cached identifier in the originator's namespace. + // + // The id is immutable. + id ProcessGroupID + + // Session is the parent Session. + // + // The session is immutable. + session *Session + + // ancestors is the number of thread groups in this process group whose + // parent is in a different process group in the same session. + // + // The name is derived from the fact that process groups where + // ancestors is zero are considered "orphans". + // + // ancestors is protected by TaskSet.mu. + ancestors uint32 + + // processGroupEntry is the embedded entry for Sessions.groups. This is + // protected by TaskSet.mu. + processGroupEntry +} + +// incRefWithParent grabs a reference. +// +// This function is called when this ProcessGroup is being associated with some +// new ThreadGroup, tg. parentPG is the ProcessGroup of tg's parent +// ThreadGroup. If tg is init, then parentPG may be nil. +// +// Precondition: callers must hold TaskSet.mu for writing. +func (pg *ProcessGroup) incRefWithParent(parentPG *ProcessGroup) { + // We acquire an "ancestor" reference in the case of a nil parent. + // This is because the process being associated is init, and init can + // never be orphaned (we count it as always having an ancestor). + if pg != parentPG && (parentPG == nil || pg.session == parentPG.session) { + pg.ancestors++ + } + + pg.refs.IncRef() +} + +// decRefWithParent drops a reference. +// +// parentPG is per incRefWithParent. +// +// Precondition: callers must hold TaskSet.mu for writing. +func (pg *ProcessGroup) decRefWithParent(parentPG *ProcessGroup) { + // See incRefWithParent regarding parent == nil. + if pg != parentPG && (parentPG == nil || pg.session == parentPG.session) { + pg.ancestors-- + } + + alive := true + pg.refs.DecRefWithDestructor(func() { + alive = false // don't bother with handleOrphan. + + // Remove translations from the originator. + for ns := pg.originator.pidns; ns != nil; ns = ns.parent { + id := ns.pgids[pg] + delete(ns.pgids, pg) + delete(ns.processGroups, id) + } + + // Remove the list of process groups. + pg.session.processGroups.Remove(pg) + pg.session.decRef() + }) + if alive { + pg.handleOrphan() + } +} + +// parentPG returns the parent process group. +// +// Precondition: callers must hold TaskSet.mu. +func (tg *ThreadGroup) parentPG() *ProcessGroup { + if tg.leader.parent != nil { + return tg.leader.parent.tg.processGroup + } + return nil +} + +// handleOrphan checks whether the process group is an orphan and has any +// stopped jobs. If yes, then appropriate signals are delivered to each thread +// group within the process group. +// +// Precondition: callers must hold TaskSet.mu for writing. +func (pg *ProcessGroup) handleOrphan() { + // Check if this process is an orphan. + if pg.ancestors != 0 { + return + } + + // See if there are any stopped jobs. + hasStopped := false + pg.originator.pidns.owner.forEachThreadGroupLocked(func(tg *ThreadGroup) { + if tg.processGroup != pg { + return + } + tg.signalHandlers.mu.Lock() + if tg.groupStopPhase == groupStopComplete { + hasStopped = true + } + tg.signalHandlers.mu.Unlock() + }) + if !hasStopped { + return + } + + // Deliver appropriate signals to all thread groups. + pg.originator.pidns.owner.forEachThreadGroupLocked(func(tg *ThreadGroup) { + if tg.processGroup != pg { + return + } + tg.signalHandlers.mu.Lock() + tg.leader.sendSignalLocked(sigPriv(linux.SIGHUP), true /* group */) + tg.leader.sendSignalLocked(sigPriv(linux.SIGCONT), true /* group */) + tg.signalHandlers.mu.Unlock() + }) + + return +} + +// CreateSession creates a new Session, with the ThreadGroup as the leader. +// +// EPERM may be returned if either the given ThreadGroup is already a Session +// leader, or a ProcessGroup already exists for the ThreadGroup's ID. +func (tg *ThreadGroup) CreateSession() error { + tg.pidns.owner.mu.Lock() + defer tg.pidns.owner.mu.Unlock() + return tg.createSession() +} + +// createSession creates a new session for a threadgroup. +// +// Precondition: callers must hold TaskSet.mu for writing. +func (tg *ThreadGroup) createSession() error { + // Get the ID for this thread in the current namespace. + id := tg.pidns.tids[tg.leader] + + // Check if this ThreadGroup already leads a Session, or + // if the proposed group is already taken. + for s := tg.pidns.owner.sessions.Front(); s != nil; s = s.Next() { + if s.leader.pidns != tg.pidns { + continue + } + if s.leader == tg { + return syserror.EPERM + } + if s.id == SessionID(id) { + return syserror.EPERM + } + for pg := s.processGroups.Front(); pg != nil; pg = pg.Next() { + if pg.id == ProcessGroupID(id) { + return syserror.EPERM + } + } + } + + // Create a new Session, with a single reference. + s := &Session{ + id: SessionID(id), + leader: tg, + } + + // Create a new ProcessGroup, belonging to that Session. + // This also has a single reference (assigned below). + // + // Note that since this is a new session and a new process group, there + // will be zero ancestors for this process group. (It is an orphan at + // this point.) + pg := &ProcessGroup{ + id: ProcessGroupID(id), + originator: tg, + session: s, + ancestors: 0, + } + + // Tie them and return the result. + s.processGroups.PushBack(pg) + tg.pidns.owner.sessions.PushBack(s) + + // Leave the current group, and assign the new one. + if tg.processGroup != nil { + oldParentPG := tg.parentPG() + tg.forEachChildThreadGroupLocked(func(childTG *ThreadGroup) { + childTG.processGroup.incRefWithParent(pg) + childTG.processGroup.decRefWithParent(oldParentPG) + }) + tg.processGroup.decRefWithParent(oldParentPG) + tg.processGroup = pg + } else { + // The current process group may be nil only in the case of an + // unparented thread group (i.e. the init process). This would + // not normally occur, but we allow it for the convenience of + // CreateSession working from that point. There will be no + // child processes. We always say that the very first group + // created has ancestors (avoids checks elsewhere). + // + // Note that this mirrors the parent == nil logic in + // incRef/decRef/reparent, which counts nil as an ancestor. + tg.processGroup = pg + tg.processGroup.ancestors++ + } + + // Ensure a translation is added to all namespaces. + for ns := tg.pidns; ns != nil; ns = ns.parent { + local := ns.tids[tg.leader] + ns.sids[s] = SessionID(local) + ns.sessions[SessionID(local)] = s + ns.pgids[pg] = ProcessGroupID(local) + ns.processGroups[ProcessGroupID(local)] = pg + } + + return nil +} + +// CreateProcessGroup creates a new process group. +// +// An EPERM error will be returned if the ThreadGroup belongs to a different +// Session, is a Session leader or the group already exists. +func (tg *ThreadGroup) CreateProcessGroup() error { + tg.pidns.owner.mu.Lock() + defer tg.pidns.owner.mu.Unlock() + + // Get the ID for this thread in the current namespace. + id := tg.pidns.tids[tg.leader] + + // Per above, check for a Session leader or existing group. + for s := tg.pidns.owner.sessions.Front(); s != nil; s = s.Next() { + if s.leader.pidns != tg.pidns { + continue + } + if s.leader == tg { + return syserror.EPERM + } + for pg := s.processGroups.Front(); pg != nil; pg = pg.Next() { + if pg.id == ProcessGroupID(id) { + return syserror.EPERM + } + } + } + + // Create a new ProcessGroup, belonging to the current Session. + // + // We manually adjust the ancestors if the parent is in the same + // session. + tg.processGroup.session.incRef() + pg := &ProcessGroup{ + id: ProcessGroupID(id), + originator: tg, + session: tg.processGroup.session, + } + if tg.leader.parent != nil && tg.leader.parent.tg.processGroup.session == pg.session { + pg.ancestors++ + } + + // Assign the new process group; adjust children. + oldParentPG := tg.parentPG() + tg.forEachChildThreadGroupLocked(func(childTG *ThreadGroup) { + childTG.processGroup.incRefWithParent(pg) + childTG.processGroup.decRefWithParent(oldParentPG) + }) + tg.processGroup.decRefWithParent(oldParentPG) + tg.processGroup = pg + + // Ensure this translation is added to all namespaces. + for ns := tg.pidns; ns != nil; ns = ns.parent { + local := ns.tids[tg.leader] + ns.pgids[pg] = ProcessGroupID(local) + ns.processGroups[ProcessGroupID(local)] = pg + } + + return nil +} + +// JoinProcessGroup joins an existing process group. +// +// This function will return EACCES if an exec has been performed since fork +// by the given ThreadGroup, and EPERM if the Sessions are not the same or the +// group does not exist. +// +// If checkExec is set, then the join is not permitted after the process has +// executed exec at least once. +func (tg *ThreadGroup) JoinProcessGroup(pidns *PIDNamespace, pgid ProcessGroupID, checkExec bool) error { + pidns.owner.mu.Lock() + defer pidns.owner.mu.Unlock() + + // Lookup the ProcessGroup. + pg := pidns.processGroups[pgid] + if pg == nil { + return syserror.EPERM + } + + // Disallow the join if an execve has performed, per POSIX. + if checkExec && tg.execed { + return syserror.EACCES + } + + // See if it's in the same session as ours. + if pg.session != tg.processGroup.session { + return syserror.EPERM + } + + // Join the group; adjust children. + parentPG := tg.parentPG() + pg.incRefWithParent(parentPG) + tg.forEachChildThreadGroupLocked(func(childTG *ThreadGroup) { + childTG.processGroup.incRefWithParent(pg) + childTG.processGroup.decRefWithParent(tg.processGroup) + }) + tg.processGroup.decRefWithParent(parentPG) + tg.processGroup = pg + + return nil +} + +// Session returns the ThreadGroup's Session. +// +// A reference is not taken on the session. +func (tg *ThreadGroup) Session() *Session { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.processGroup.session +} + +// IDOfSession returns the Session assigned to s in PID namespace ns. +// +// If this group isn't visible in this namespace, zero will be returned. It is +// the callers responsibility to check that before using this function. +func (pidns *PIDNamespace) IDOfSession(s *Session) SessionID { + pidns.owner.mu.RLock() + defer pidns.owner.mu.RUnlock() + return pidns.sids[s] +} + +// SessionWithID returns the Session with the given ID in the PID namespace ns, +// or nil if that given ID is not defined in this namespace. +// +// A reference is not taken on the session. +func (pidns *PIDNamespace) SessionWithID(id SessionID) *Session { + pidns.owner.mu.RLock() + defer pidns.owner.mu.RUnlock() + return pidns.sessions[id] +} + +// ProcessGroup returns the ThreadGroup's ProcessGroup. +// +// A reference is not taken on the process group. +func (tg *ThreadGroup) ProcessGroup() *ProcessGroup { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.processGroup +} + +// IDOfProcessGroup returns the process group assigned to pg in PID namespace ns. +// +// The same constraints apply as IDOfSession. +func (pidns *PIDNamespace) IDOfProcessGroup(pg *ProcessGroup) ProcessGroupID { + pidns.owner.mu.RLock() + defer pidns.owner.mu.RUnlock() + return pidns.pgids[pg] +} + +// ProcessGroupWithID returns the ProcessGroup with the given ID in the PID +// namespace ns, or nil if that given ID is not defined in this namespace. +// +// A reference is not taken on the process group. +func (pidns *PIDNamespace) ProcessGroupWithID(id ProcessGroupID) *ProcessGroup { + pidns.owner.mu.RLock() + defer pidns.owner.mu.RUnlock() + return pidns.processGroups[id] +} diff --git a/pkg/sentry/kernel/signal.go b/pkg/sentry/kernel/signal.go new file mode 100644 index 000000000..8edd05cdf --- /dev/null +++ b/pkg/sentry/kernel/signal.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" +) + +// SignalPanic is used to panic the running threads. It is a signal which +// cannot be used by the application: it must be caught and ignored by the +// runtime (in order to catch possible races). +const SignalPanic = linux.SIGUSR2 + +// sendExternalSignal is called when an asynchronous signal is sent to the +// sentry ("in sentry context"). On some platforms, it may also be called when +// an asynchronous signal is sent to sandboxed application threads ("in +// application context"). +// +// context is used only for debugging to differentiate these cases. +// +// Returns false if signal could not be sent because the Kernel is not fully +// initialized yet. +func (k *Kernel) sendExternalSignal(info *arch.SignalInfo, context string) bool { + switch linux.Signal(info.Signo) { + case platform.SignalInterrupt: + // Assume that a call to platform.Context.Interrupt() misfired. + return true + + case SignalPanic: + // SignalPanic is also specially handled in sentry setup to ensure that + // it causes a panic even after tasks exit, but SignalPanic may also + // be sent here if it is received while in app context. + panic("Signal-induced panic") + + default: + log.Infof("Received external signal %d in %s context", info.Signo, context) + if k.globalInit == nil { + log.Warningf("Received external signal %d before init created", info.Signo) + return false + } + k.globalInit.SendSignal(info) + } + + return true +} + +// sigPriv returns a SignalInfo representing a signal sent by the sentry. (The +// name reflects its equivalence to Linux's SEND_SIG_PRIV.) +func sigPriv(sig linux.Signal) *arch.SignalInfo { + return &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoKernel, + } +} diff --git a/pkg/sentry/kernel/signal_handlers.go b/pkg/sentry/kernel/signal_handlers.go new file mode 100644 index 000000000..21ba4ee70 --- /dev/null +++ b/pkg/sentry/kernel/signal_handlers.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" +) + +// SignalHandlers holds information about signal actions. +type SignalHandlers struct { + // mu protects actions, as well as the signal state of all tasks and thread + // groups using this SignalHandlers object. (See comment on + // ThreadGroup.signalHandlers.) + mu sync.Mutex `state:"nosave"` + + // actions is the action to be taken upon receiving each signal. + actions map[linux.Signal]arch.SignalAct +} + +// NewSignalHandlers returns a new SignalHandlers specifying all default +// actions. +func NewSignalHandlers() *SignalHandlers { + return &SignalHandlers{ + actions: make(map[linux.Signal]arch.SignalAct), + } +} + +// Fork returns a copy of sh for a new thread group. +func (sh *SignalHandlers) Fork() *SignalHandlers { + sh2 := NewSignalHandlers() + sh.mu.Lock() + defer sh.mu.Unlock() + for sig, act := range sh.actions { + sh2.actions[sig] = act + } + return sh2 +} + +// CopyForExec returns a copy of sh for a thread group that is undergoing an +// execve. (See comments in Task.finishExec.) +func (sh *SignalHandlers) CopyForExec() *SignalHandlers { + sh2 := NewSignalHandlers() + sh.mu.Lock() + defer sh.mu.Unlock() + for sig, act := range sh.actions { + if act.Handler == arch.SignalActIgnore { + sh2.actions[sig] = arch.SignalAct{ + Handler: arch.SignalActIgnore, + } + } + } + return sh2 +} + +// dequeueActionLocked returns the SignalAct that should be used to handle sig. +// +// Preconditions: sh.mu must be locked. +func (sh *SignalHandlers) dequeueAction(sig linux.Signal) arch.SignalAct { + act := sh.actions[sig] + if act.IsResetHandler() { + delete(sh.actions, sig) + } + return act +} diff --git a/pkg/sentry/kernel/syscalls.go b/pkg/sentry/kernel/syscalls.go new file mode 100644 index 000000000..e20fa3eb6 --- /dev/null +++ b/pkg/sentry/kernel/syscalls.go @@ -0,0 +1,305 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/bits" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// maxSyscallNum is the highest supported syscall number. +// +// The types below create fast lookup slices for all syscalls. This maximum +// serves as a sanity check that we don't allocate huge slices for a very large +// syscall. +const maxSyscallNum = 2000 + +// SyscallFn is a syscall implementation. +type SyscallFn func(t *Task, args arch.SyscallArguments) (uintptr, *SyscallControl, error) + +// MissingFn is a syscall to be called when an implementation is missing. +type MissingFn func(t *Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) + +// Possible flags for SyscallFlagsTable.enable. +const ( + // syscallPresent indicates that this is not a missing syscall. + // + // This flag is used internally in SyscallFlagsTable. + syscallPresent = 1 << iota + + // StraceEnableLog enables syscall log tracing. + StraceEnableLog + + // StraceEnableEvent enables syscall event tracing. + StraceEnableEvent + + // ExternalBeforeEnable enables the external hook before syscall execution. + ExternalBeforeEnable + + // ExternalAfterEnable enables the external hook after syscall execution. + ExternalAfterEnable +) + +// StraceEnableBits combines both strace log and event flags. +const StraceEnableBits = StraceEnableLog | StraceEnableEvent + +// SyscallFlagsTable manages a set of enable/disable bit fields on a per-syscall +// basis. +type SyscallFlagsTable struct { + // mu protects writes to the fields below. + // + // Atomic loads are always allowed. Atomic stores are allowed only + // while mu is held. + mu sync.Mutex + + // enable contains the enable bits for each syscall. + // + // missing syscalls have the same value in enable as missingEnable to + // avoid an extra branch in Word. + enable []uint32 + + // missingEnable contains the enable bits for missing syscalls. + missingEnable uint32 +} + +// Init initializes the struct, with all syscalls in table set to enable. +// +// max is the largest syscall number in table. +func (e *SyscallFlagsTable) init(table map[uintptr]SyscallFn, max uintptr) { + e.enable = make([]uint32, max+1) + for num := range table { + e.enable[num] = syscallPresent + } +} + +// Word returns the enable bitfield for sysno. +func (e *SyscallFlagsTable) Word(sysno uintptr) uint32 { + if sysno < uintptr(len(e.enable)) { + return atomic.LoadUint32(&e.enable[sysno]) + } + + return atomic.LoadUint32(&e.missingEnable) +} + +// Enable sets enable bit bit for all syscalls based on s. +// +// Syscalls missing from s are disabled. +// +// Syscalls missing from the initial table passed to Init cannot be added as +// individual syscalls. If present in s they will be ignored. +// +// Callers to Word may see either the old or new value while this function +// is executing. +func (e *SyscallFlagsTable) Enable(bit uint32, s map[uintptr]bool, missingEnable bool) { + e.mu.Lock() + defer e.mu.Unlock() + + missingVal := atomic.LoadUint32(&e.missingEnable) + if missingEnable { + missingVal |= bit + } else { + missingVal &^= bit + } + atomic.StoreUint32(&e.missingEnable, missingVal) + + for num := range e.enable { + val := atomic.LoadUint32(&e.enable[num]) + if !bits.IsOn32(val, syscallPresent) { + // Missing. + atomic.StoreUint32(&e.enable[num], missingVal) + continue + } + + if s[uintptr(num)] { + val |= bit + } else { + val &^= bit + } + atomic.StoreUint32(&e.enable[num], val) + } +} + +// EnableAll sets enable bit bit for all syscalls, present and missing. +func (e *SyscallFlagsTable) EnableAll(bit uint32) { + e.mu.Lock() + defer e.mu.Unlock() + + missingVal := atomic.LoadUint32(&e.missingEnable) + missingVal |= bit + atomic.StoreUint32(&e.missingEnable, missingVal) + + for num := range e.enable { + val := atomic.LoadUint32(&e.enable[num]) + if !bits.IsOn32(val, syscallPresent) { + // Missing. + atomic.StoreUint32(&e.enable[num], missingVal) + continue + } + + val |= bit + atomic.StoreUint32(&e.enable[num], val) + } +} + +// Stracer traces syscall execution. +type Stracer interface { + // SyscallEnter is called on syscall entry. + // + // The returned private data is passed to SyscallExit. + // + // TODO: remove kernel imports from the strace package so + // that the type can be used directly. + SyscallEnter(t *Task, sysno uintptr, args arch.SyscallArguments, flags uint32) interface{} + + // SyscallExit is called on syscall exit. + SyscallExit(context interface{}, t *Task, sysno, rval uintptr, err error) +} + +// SyscallTable is a lookup table of system calls. Critically, a SyscallTable +// is *immutable*. In order to make supporting suspend and resume sane, they +// must be uniquely registered and may not change during operation. +type SyscallTable struct { + // OS is the operating system that this syscall table implements. + OS abi.OS `state:"wait"` + + // Arch is the architecture that this syscall table targets. + Arch arch.Arch `state:"wait"` + + // The OS version that this syscall table implements. + Version Version `state:"manual"` + + // AuditNumber is a numeric constant that represents the syscall table. If + // non-zero, auditNumber must be one of the AUDIT_ARCH_* values defined by + // linux/audit.h. + AuditNumber uint32 `state:"manual"` + + // Table is the collection of functions. + Table map[uintptr]SyscallFn `state:"manual"` + + // lookup is a fixed-size array that holds the syscalls (indexed by + // their numbers). It is used for fast look ups. + lookup []SyscallFn `state:"manual"` + + // Emulate is a collection of instruction addresses to emulate. The + // keys are addresses, and the values are system call numbers. + Emulate map[usermem.Addr]uintptr `state:"manual"` + + // The function to call in case of a missing system call. + Missing MissingFn `state:"manual"` + + // Stracer traces this syscall table. + Stracer Stracer `state:"manual"` + + // External is used to handle an external callback. + External func(*Kernel) `state:"manual"` + + // ExternalFilterBefore is called before External is called before the syscall is executed. + // External is not called if it returns false. + ExternalFilterBefore func(*Task, uintptr, arch.SyscallArguments) bool `state:"manual"` + + // ExternalFilterAfter is called before External is called after the syscall is executed. + // External is not called if it returns false. + ExternalFilterAfter func(*Task, uintptr, arch.SyscallArguments) bool `state:"manual"` + + // FeatureEnable stores the strace and one-shot enable bits. + FeatureEnable SyscallFlagsTable `state:"manual"` +} + +// allSyscallTables contains all known tables. +var allSyscallTables []*SyscallTable + +// SyscallTables returns a read-only slice of registered SyscallTables. +func SyscallTables() []*SyscallTable { + return allSyscallTables +} + +// LookupSyscallTable returns the SyscallCall table for the OS/Arch combination. +func LookupSyscallTable(os abi.OS, a arch.Arch) (*SyscallTable, bool) { + for _, s := range allSyscallTables { + if s.OS == os && s.Arch == a { + return s, true + } + } + return nil, false +} + +// RegisterSyscallTable registers a new syscall table for use by a Kernel. +func RegisterSyscallTable(s *SyscallTable) { + if s.Table == nil { + // Ensure non-nil lookup table. + s.Table = make(map[uintptr]SyscallFn) + } + if s.Emulate == nil { + // Ensure non-nil emulate table. + s.Emulate = make(map[usermem.Addr]uintptr) + } + + var max uintptr + for num := range s.Table { + if num > max { + max = num + } + } + + if max > maxSyscallNum { + panic(fmt.Sprintf("SyscallTable %+v contains too large syscall number %d", s, max)) + } + + s.lookup = make([]SyscallFn, max+1) + + // Initialize the fast-lookup table. + for num, fn := range s.Table { + s.lookup[num] = fn + } + + s.FeatureEnable.init(s.Table, max) + + if _, ok := LookupSyscallTable(s.OS, s.Arch); ok { + panic(fmt.Sprintf("Duplicate SyscallTable registered for OS %v Arch %v", s.OS, s.Arch)) + } + + // Save a reference to this table. + // + // This is required for a Kernel to find the table and for save/restore + // operations below. + allSyscallTables = append(allSyscallTables, s) +} + +// Lookup returns the syscall implementation, if one exists. +func (s *SyscallTable) Lookup(sysno uintptr) SyscallFn { + if sysno < uintptr(len(s.lookup)) { + return s.lookup[sysno] + } + + return nil +} + +// LookupEmulate looks up an emulation syscall number. +func (s *SyscallTable) LookupEmulate(addr usermem.Addr) (uintptr, bool) { + sysno, ok := s.Emulate[addr] + return sysno, ok +} + +// mapLookup is similar to Lookup, except that it only uses the syscall table, +// that is, it skips the fast look array. This is available for benchmarking. +func (s *SyscallTable) mapLookup(sysno uintptr) SyscallFn { + return s.Table[sysno] +} diff --git a/pkg/sentry/kernel/syscalls_state.go b/pkg/sentry/kernel/syscalls_state.go new file mode 100644 index 000000000..826809a70 --- /dev/null +++ b/pkg/sentry/kernel/syscalls_state.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import "fmt" + +// afterLoad is invoked by stateify. +func (s *SyscallTable) afterLoad() { + otherTable, ok := LookupSyscallTable(s.OS, s.Arch) + if !ok { + // Couldn't find a reference? + panic(fmt.Sprintf("syscall table not found for OS %v Arch %v", s.OS, s.Arch)) + } + + // Copy the table. + *s = *otherTable +} diff --git a/pkg/sentry/kernel/syslog.go b/pkg/sentry/kernel/syslog.go new file mode 100644 index 000000000..31541749e --- /dev/null +++ b/pkg/sentry/kernel/syslog.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "math/rand" + "sync" +) + +// syslog represents a sentry-global kernel log. +// +// Currently, it contains only fun messages for a dmesg easter egg. +type syslog struct { + // mu protects the below. + mu sync.Mutex `state:"nosave"` + + // msg is the syslog message buffer. It is lazily initialized. + msg []byte +} + +// Log returns a copy of the syslog. +func (s *syslog) Log() []byte { + s.mu.Lock() + defer s.mu.Unlock() + + if s.msg != nil { + // Already initialized, just return a copy. + o := make([]byte, len(s.msg)) + copy(o, s.msg) + return o + } + + // Not initialized, create message. + allMessages := []string{ + "Synthesizing system calls...", + "Mounting deweydecimalfs...", + "Moving files to filing cabinet...", + "Digging up root...", + "Constructing home...", + "Segmenting fault lines...", + "Creating bureaucratic processes...", + "Searching for needles in stacks...", + "Preparing for the zombie uprising...", + "Feeding the init monster...", + "Creating cloned children...", + "Daemonizing children...", + "Waiting for children...", + "Gathering forks...", + "Committing treasure map to memory...", + "Reading process obituaries...", + "Searching for socket adapter...", + "Creating process schedule...", + "Generating random numbers by fair dice roll...", + "Rewriting operating system in Javascript...", + "Consulting tar man page...", + "Forking spaghetti code...", + "Checking naughty and nice process list...", + "Checking naughty and nice process list...", // Check it up to twice. + "Granting licence to kill(2)...", // British spelling for British movie. + "Letting the watchdogs out...", + } + + selectMessage := func() string { + i := rand.Intn(len(allMessages)) + m := allMessages[i] + + // Delete the selected message. + allMessages[i] = allMessages[len(allMessages)-1] + allMessages = allMessages[:len(allMessages)-1] + + return m + } + + time := 0.0 + for i := 0; i < 10; i++ { + time += rand.Float64() / 2 + s.msg = append(s.msg, []byte(fmt.Sprintf("<6>[%11.6f] %s\n", time, selectMessage()))...) + } + + time += rand.Float64() / 2 + s.msg = append(s.msg, []byte(fmt.Sprintf("<6>[%11.6f] Ready!\n", time))...) + + // Return a copy. + o := make([]byte, len(s.msg)) + copy(o, s.msg) + return o +} diff --git a/pkg/sentry/kernel/table_test.go b/pkg/sentry/kernel/table_test.go new file mode 100644 index 000000000..71ca75555 --- /dev/null +++ b/pkg/sentry/kernel/table_test.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" +) + +const ( + maxTestSyscall = 1000 +) + +func createSyscallTable() *SyscallTable { + m := make(map[uintptr]SyscallFn) + for i := uintptr(0); i <= maxTestSyscall; i++ { + j := i + m[i] = func(*Task, arch.SyscallArguments) (uintptr, *SyscallControl, error) { + return j, nil, nil + } + } + + s := &SyscallTable{ + OS: abi.Linux, + Arch: arch.AMD64, + Table: m, + } + + RegisterSyscallTable(s) + return s +} + +func TestTable(t *testing.T) { + table := createSyscallTable() + defer func() { + // Cleanup registered tables to keep tests separate. + allSyscallTables = []*SyscallTable{} + }() + + // Go through all functions and check that they return the right value. + for i := uintptr(0); i < maxTestSyscall; i++ { + fn := table.Lookup(i) + if fn == nil { + t.Errorf("Syscall %v is set to nil", i) + continue + } + + v, _, _ := fn(nil, arch.SyscallArguments{}) + if v != i { + t.Errorf("Wrong return value for syscall %v: expected %v, got %v", i, i, v) + } + } + + // Check that values outside the range return nil. + for i := uintptr(maxTestSyscall + 1); i < maxTestSyscall+100; i++ { + fn := table.Lookup(i) + if fn != nil { + t.Errorf("Syscall %v is not nil: %v", i, fn) + continue + } + } +} + +func BenchmarkTableLookup(b *testing.B) { + table := createSyscallTable() + + b.ResetTimer() + + j := uintptr(0) + for i := 0; i < b.N; i++ { + table.Lookup(j) + j = (j + 1) % 310 + } + + b.StopTimer() + // Cleanup registered tables to keep tests separate. + allSyscallTables = []*SyscallTable{} +} + +func BenchmarkTableMapLookup(b *testing.B) { + table := createSyscallTable() + + b.ResetTimer() + + j := uintptr(0) + for i := 0; i < b.N; i++ { + table.mapLookup(j) + j = (j + 1) % 310 + } + + b.StopTimer() + // Cleanup registered tables to keep tests separate. + allSyscallTables = []*SyscallTable{} +} diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go new file mode 100644 index 000000000..3d2e035e9 --- /dev/null +++ b/pkg/sentry/kernel/task.go @@ -0,0 +1,606 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bpf" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + ssync "gvisor.googlesource.com/gvisor/pkg/sync" +) + +// Task represents a thread of execution in the untrusted app. It +// includes registers and any thread-specific state that you would +// normally expect. +// +// Each task is associated with a goroutine, called the task goroutine, that +// executes code (application code, system calls, etc.) on behalf of that task. +// See Task.run (task_run.go). +// +// All fields that are "owned by the task goroutine" can only be mutated by the +// task goroutine while it is running. The task goroutine does not require +// synchronization to read these fields, although it still requires +// synchronization as described for those fields to mutate them. +// +// All fields that are "exclusive to the task goroutine" can only be accessed +// by the task goroutine while it is running. The task goroutine does not +// require synchronization to read or write these fields. +type Task struct { + taskNode + + // runState is what the task goroutine is executing if it is not stopped. + // If runState is nil, the task goroutine should exit or has exited. + // runState is exclusive to the task goroutine. + runState taskRunState + + // haveSyscallReturn is true if tc.Arch().Return() represents a value + // returned by a syscall (or set by ptrace after a syscall). + // + // haveSyscallReturn is exclusive to the task goroutine. + haveSyscallReturn bool + + // interruptChan is notified whenever the task goroutine is interrupted + // (usually by a pending signal). interruptChan is effectively a condition + // variable that can be used in select statements. + // + // interruptChan is not saved; because saving interrupts all tasks, + // interruptChan is always notified after restore (see Task.run). + interruptChan chan struct{} `state:"nosave"` + + // gosched contains the current scheduling state of the task goroutine. + // + // gosched is protected by goschedSeq. gosched is owned by the task + // goroutine. + goschedSeq ssync.SeqCount `state:"nosave"` + gosched TaskGoroutineSchedInfo + + // yieldCount is the number of times the task goroutine has called + // Task.InterruptibleSleepStart, Task.UninterruptibleSleepStart, or + // Task.Yield(), voluntarily ceasing execution. + // + // yieldCount is accessed using atomic memory operations. yieldCount is + // owned by the task goroutine. + yieldCount uint64 + + // pendingSignals is the set of pending signals that may be handled only by + // this task. + // + // pendingSignals is protected by (taskNode.)tg.signalHandlers.mu + // (hereafter "the signal mutex"); see comment on + // ThreadGroup.signalHandlers. + pendingSignals pendingSignals + + // If haveSavedSignalMask is true, savedSignalMask is the signal mask that + // should be applied after the task has either delivered one signal to a + // user handler or is about to resume execution in the untrusted + // application. + // + // Both haveSavedSignalMask and savedSignalMask are exclusive to the task + // goroutine. + haveSavedSignalMask bool + savedSignalMask linux.SignalSet + + // signalStack is the alternate signal stack used by signal handlers for + // which the SA_ONSTACK flag is set. + // + // signalStack is exclusive to the task goroutine. + signalStack arch.SignalStack + + // If groupStopRequired is true, the task should enter a group stop in the + // interrupt path. groupStopRequired is not redundant with + // tg.groupStopPhase != groupStopNone, because ptrace allows tracers to + // resume individual tasks from a group stop without ending the group stop + // as a whole. + // + // groupStopRequired is analogous to JOBCTL_TRAP_STOP in Linux, except that + // Linux only uses that flag for ptraced tasks. + // + // groupStopRequired is protected by the signal mutex. + groupStopRequired bool + + // If groupStopAcknowledged is true, the task has already acknowledged that + // it is entering the most recent group stop that has been initiated on its + // thread group. groupStopAcknowledged is only meaningful if + // tg.groupStopPhase == groupStopInitiated. + // + // groupStopAcknowledged is analogous to !JOBCTL_STOP_CONSUME in Linux. + // + // groupStopAcknowledged is protected by the signal mutex. + groupStopAcknowledged bool + + // If stop is not nil, it is the internally-initiated condition that + // currently prevents the task goroutine from running. + // + // stop is protected by the signal mutex. + stop TaskStop + + // stopCount is the number of active external stops (calls to + // Task.BeginExternalStop that have not been paired with a call to + // Task.EndExternalStop), plus 1 if stop is not nil. Hence stopCount is + // non-zero if the task goroutine should stop. + // + // Mutating stopCount requires both locking the signal mutex and using + // atomic memory operations. Reading stopCount requires either locking the + // signal mutex or using atomic memory operations. This allows Task.doStop + // to require only a single atomic read in the common case where stopCount + // is 0. + // + // stopCount is not saved, because external stops cannot be retained across + // a save/restore cycle. (Suppose a sentryctl command issues an external + // stop; after a save/restore cycle, the restored sentry has no knowledge + // of the pre-save sentryctl command, and the stopped task would remain + // stopped forever.) + stopCount int32 `state:"nosave"` + + // endStopCond is signaled when stopCount transitions to 0. The combination + // of stopCount and endStopCond effectively form a sync.WaitGroup, but + // WaitGroup provides no way to read its counter value. + // + // Invariant: endStopCond.L is the signal mutex. (This is not racy because + // sync.Cond.Wait is the only user of sync.Cond.L; only the task goroutine + // calls sync.Cond.Wait; and only the task goroutine can change the + // identity of the signal mutex, in Task.finishExec.) + endStopCond sync.Cond `state:"nosave"` + + // exitStatus is the task's exit status. + // + // exitStatus is protected by the signal mutex. + exitStatus ExitStatus + + // syscallRestartBlock represents a custom restart function to run in + // restart_syscall(2) to resume an interrupted syscall. + // + // syscallRestartBlock is exclusive to the task goroutine. + syscallRestartBlock SyscallRestartBlock + + // mu protects some of the following fields. + mu sync.Mutex `state:"nosave"` + + // tc and tr form the majority of the task's data. + // + // tc and tr are protected by mu. tc and tr are owned by the task + // goroutine. tr.signalMask is protected by the signal mutex and must be + // written using atomic memory operations (such that reading tr.signalMask + // is safe if the signal mutex is locked or if atomic memory operations are + // used), but is also owned by the task goroutine. + tc TaskContext + tr TaskResources + + // p provides the mechanism by which the task runs code in userspace. The p + // interface object is immutable. + p platform.Context `state:"nosave"` + + // k is the Kernel that this task belongs to. The k pointer is immutable. + k *Kernel + + // If vforkParent is not nil, it is the task that created this task with + // vfork() or clone(CLONE_VFORK), and should have its vforkStop ended when + // this TaskContext is released. + // + // vforkParent is protected by the TaskSet mutex. + vforkParent *Task + + // exitState is the task's progress through the exit path. + // + // exitState is protected by the TaskSet mutex. exitState is owned by the + // task goroutine. + exitState TaskExitState + + // exitTracerNotified is true if the exit path has either signaled the + // task's tracer to indicate the exit, or determined that no such signal is + // needed. exitTracerNotified can only be true if exitState is + // TaskExitZombie or TaskExitDead. + // + // exitTracerNotified is protected by the TaskSet mutex. + exitTracerNotified bool + + // exitTracerAcked is true if exitTracerNotified is true and either the + // task's tracer has acknowledged the exit notification, or the exit path + // has determined that no such notification is needed. + // + // exitTracerAcked is protected by the TaskSet mutex. + exitTracerAcked bool + + // exitParentNotified is true if the exit path has either signaled the + // task's parent to indicate the exit, or determined that no such signal is + // needed. exitParentNotified can only be true if exitState is + // TaskExitZombie or TaskExitDead. + // + // exitParentNotified is protected by the TaskSet mutex. + exitParentNotified bool + + // exitParentAcked is true if exitParentNotified is true and either the + // task's parent has acknowledged the exit notification, or the exit path + // has determined that no such acknowledgment is needed. + // + // exitParentAcked is protected by the TaskSet mutex. + exitParentAcked bool + + // goroutineStopped is a WaitGroup whose counter value is 1 when the task + // goroutine is running and 0 when the task goroutine is stopped or has + // exited. + goroutineStopped sync.WaitGroup `state:"nosave"` + + // ptraceTracer is the task that is ptrace-attached to this one. If + // ptraceTracer is nil, this task is not being traced. Note that due to + // atomic.Value limitations (atomic.Value.Store(nil) panics), a nil + // ptraceTracer is always represented as a typed nil (i.e. (*Task)(nil)). + // + // ptraceTracer is protected by the TaskSet mutex, and accessed with atomic + // operations. This allows paths that wouldn't otherwise lock the TaskSet + // mutex, notably the syscall path, to check if ptraceTracer is nil without + // additional synchronization. + ptraceTracer atomic.Value `state:".(*Task)"` + + // ptraceTracees is the set of tasks that this task is ptrace-attached to. + // + // ptraceTracees is protected by the TaskSet mutex. + ptraceTracees map[*Task]struct{} + + // ptraceOpts contains ptrace options explicitly set by the tracer. If + // ptraceTracer is nil, ptraceOpts is expected to be the zero value. + // + // ptraceOpts is protected by the TaskSet mutex. + ptraceOpts ptraceOptions + + // ptraceSyscallMode controls ptrace behavior around syscall entry and + // exit. + // + // ptraceSyscallMode is protected by the TaskSet mutex. + ptraceSyscallMode ptraceSyscallMode + + // If ptraceSinglestep is true, the next time the task executes application + // code, single-stepping should be enabled. ptraceSinglestep is stored + // independently of the architecture-specific trap flag because tracer + // detaching (which can happen concurrently with the tracee's execution if + // the tracer exits) must disable single-stepping, and the task's + // architectural state is implicitly exclusive to the task goroutine (no + // synchronization occurs before passing registers to SwitchToApp). + // + // ptraceSinglestep is analogous to Linux's TIF_SINGLESTEP. + // + // ptraceSinglestep is protected by the TaskSet mutex. + ptraceSinglestep bool + + // If t is ptrace-stopped, ptraceCode is a ptrace-defined value set at the + // time that t entered the ptrace stop, reset to 0 when the tracer + // acknowledges the stop with a wait*() syscall. Otherwise, it is the + // signal number passed to the ptrace operation that ended the last ptrace + // stop on this task. In the latter case, the effect of ptraceCode depends + // on the nature of the ptrace stop; signal-delivery-stop uses it to + // conditionally override ptraceSiginfo, syscall-entry/exit-stops send the + // signal to the task after leaving the stop, and PTRACE_EVENT stops and + // traced group stops ignore it entirely. + // + // Linux contextually stores the equivalent of ptraceCode in + // task_struct::exit_code. + // + // ptraceCode is protected by the TaskSet mutex. + ptraceCode int32 + + // ptraceSiginfo is the value returned to the tracer by + // ptrace(PTRACE_GETSIGINFO) and modified by ptrace(PTRACE_SETSIGINFO). + // (Despite the name, PTRACE_PEEKSIGINFO is completely unrelated.) + // ptraceSiginfo is nil if the task is in a ptraced group-stop (this is + // required for PTRACE_GETSIGINFO to return EINVAL during such stops, which + // is in turn required to distinguish group stops from other ptrace stops, + // per subsection "Group-stop" in ptrace(2)). + // + // ptraceSiginfo is analogous to Linux's task_struct::last_siginfo. + // + // ptraceSiginfo is protected by the TaskSet mutex. + ptraceSiginfo *arch.SignalInfo + + // ptraceEventMsg is the value set by PTRACE_EVENT stops and returned to + // the tracer by ptrace(PTRACE_GETEVENTMSG). + // + // ptraceEventMsg is protected by the TaskSet mutex. + ptraceEventMsg uint64 + + // The struct that holds the IO-related usage. The ioUsage pointer is + // immutable. + ioUsage *usage.IO + + // logPrefix is a string containing the task's thread ID in the root PID + // namespace, and is prepended to log messages emitted by Task.Infof etc. + logPrefix atomic.Value `state:".(string)"` + + // creds is the task's credentials. + // + // creds is protected by mu. + creds *auth.Credentials + + // utsns is the task's UTS namespace. + // + // utsns is protected by mu. + utsns *UTSNamespace + + // ipcns is the task's IPC namespace. + // + // ipcns is protected by mu. + ipcns *IPCNamespace + + // parentDeathSignal is sent to this task's thread group when its parent exits. + // + // parentDeathSignal is protected by mu. + parentDeathSignal linux.Signal + + // syscallFilters is all seccomp-bpf syscall filters applicable to the + // task, in the order in which they were installed. + // + // syscallFilters is protected by mu. syscallFilters is owned by the task + // goroutine. + syscallFilters []bpf.Program + + // If cleartid is non-zero, treat it as a pointer to a ThreadID in the + // task's virtual address space; when the task exits, set the pointed-to + // ThreadID to 0, and wake any futex waiters. + // + // cleartid is exclusive to the task goroutine. + cleartid usermem.Addr + + // This is mostly a fake cpumask just for sched_set/getaffinity as we + // don't really control the affinity. + // + // Invariant: allowedCPUMask.Size() == + // sched.CPUMaskSize(Kernel.applicationCores). + // + // allowedCPUMask is protected by mu. + allowedCPUMask sched.CPUSet + + // cpu is the fake cpu number returned by getcpu(2). cpu is ignored + // entirely if Kernel.useHostCores is true. + // + // cpu is accessed using atomic memory operations. + cpu int32 + + // This is used to keep track of changes made to a process' priority/niceness. + // It is mostly used to provide some reasonable return value from + // getpriority(2) after a call to setpriority(2) has been made. + // We currently do not actually modify a process' scheduling priority. + // NOTE: This represents the userspace view of priority (nice). + // This means that the value should be in the range [-20, 19]. + // + // niceness is protected by mu. + niceness int + + // This is used to track the numa policy for the current thread. This can be + // modified through a set_mempolicy(2) syscall. Since we always report a + // single numa node, all policies are no-ops. We only track this information + // so that we can return reasonable values if the application calls + // get_mempolicy(2) after setting a non-default policy. Note that in the + // real syscall, nodemask can be longer than 4 bytes, but we always report a + // single node so never need to save more than a single bit. + // + // numaPolicy and numaNodeMask are protected by mu. + numaPolicy int32 + numaNodeMask uint32 + + // If netns is true, the task is in a non-root network namespace. Network + // namespaces aren't currently implemented in full; being in a network + // namespace simply prevents the task from observing any network devices + // (including loopback) or using abstract socket addresses (see unix(7)). + // + // netns is protected by mu. netns is owned by the task goroutine. + netns bool + + // If rseqPreempted is true, before the next call to p.Switch(), interrupt + // RSEQ critical regions as defined by tg.rseq and write the task + // goroutine's CPU number to rseqCPUAddr. rseqCPU is the last CPU number + // written to rseqCPUAddr. + // + // If rseqCPUAddr is 0, rseqCPU is -1. + // + // rseqCPUAddr, rseqCPU, and rseqPreempted are exclusive to the task + // goroutine. + rseqPreempted bool `state:"nosave"` + rseqCPUAddr usermem.Addr + rseqCPU int32 + + // copyScratchBuffer is a buffer available to CopyIn/CopyOut + // implementations that require an intermediate buffer to copy data + // into/out of. It prevents these buffers from being allocated/zeroed in + // each syscall and eventually garbage collected. + // + // copyScratchBuffer is exclusive to the task goroutine. + copyScratchBuffer [copyScratchBufferLen]byte `state:"nosave"` + + // blockingTimer is used for blocking timeouts. blockingTimerChan is the + // channel that is sent to when blockingTimer fires. + // + // blockingTimer is exclusive to the task goroutine. + blockingTimer *ktime.Timer `state:"nosave"` + blockingTimerChan <-chan struct{} `state:"nosave"` + + // futexWaiter is used for futex(FUTEX_WAIT) syscalls. + // + // futexWaiter is exclusive to the task goroutine. + futexWaiter *futex.Waiter `state:"nosave"` + + // startTime is the real time at which the task started. It is set when + // a Task is created or invokes execve(2). + // + // startTime is protected by mu. + startTime ktime.Time +} + +func (t *Task) savePtraceTracer() *Task { + return t.ptraceTracer.Load().(*Task) +} + +func (t *Task) loadPtraceTracer(tracer *Task) { + t.ptraceTracer.Store(tracer) +} + +func (t *Task) saveLogPrefix() string { + return t.logPrefix.Load().(string) +} + +func (t *Task) loadLogPrefix(prefix string) { + t.logPrefix.Store(prefix) +} + +// afterLoad is invoked by stateify. +func (t *Task) afterLoad() { + t.interruptChan = make(chan struct{}, 1) + t.gosched.State = TaskGoroutineNonexistent + if t.stop != nil { + t.stopCount = 1 + } + t.endStopCond.L = &t.tg.signalHandlers.mu + t.p = t.k.Platform.NewContext() + t.rseqPreempted = true + t.futexWaiter = futex.NewWaiter() +} + +// copyScratchBufferLen is the length of the copyScratchBuffer field of the Task +// struct. +const copyScratchBufferLen = 52 + +// TaskMaybe is the interface for extracting Tasks out of things which may be +// or contain Task objects. +type TaskMaybe interface { + // ExtractTask returns the Task. + ExtractTask() *Task +} + +// CopyScratchBuffer returns a scratch buffer to be used in CopyIn/CopyOut +// functions. It must only be used within those functions and can only be used +// by the task goroutine; it exists to improve performance and thus +// intentionally lacks any synchronization. +// +// Callers should pass a constant value as an argument, which will allow the +// compiler to inline and optimize out the if statement below. +func (t *Task) CopyScratchBuffer(size int) []byte { + if size > copyScratchBufferLen { + return make([]byte, size) + } + return t.copyScratchBuffer[:size] +} + +// FutexWaiter returns the Task's futex.Waiter. +func (t *Task) FutexWaiter() *futex.Waiter { + return t.futexWaiter +} + +// ExtractTask implements TaskMaybe.ExtractTask. +func (t *Task) ExtractTask() *Task { + return t +} + +// TaskContext returns t's TaskContext. +// +// Precondition: The caller must be running on the task goroutine, or t.mu must +// be locked. +func (t *Task) TaskContext() *TaskContext { + return &t.tc +} + +// TaskResources returns t's TaskResources. +// +// Precondition: The caller must be running on the task goroutine, or t.mu must +// be locked. +func (t *Task) TaskResources() *TaskResources { + return &t.tr +} + +// WithMuLocked executes f with t.mu locked. +func (t *Task) WithMuLocked(f func(*Task)) { + t.mu.Lock() + defer t.mu.Unlock() + f(t) +} + +// Kernel returns the Kernel containing t. +func (t *Task) Kernel() *Kernel { + return t.k +} + +// Value implements context.Context.Value. +func (t *Task) Value(key interface{}) interface{} { + switch key { + case CtxCanTrace: + return t.CanTrace + case CtxKernel: + return t.k + case CtxPIDNamespace: + return t.tg.pidns + case CtxUTSNamespace: + return t.utsns + case CtxIPCNamespace: + return t.ipcns + case CtxTask: + return t + case auth.CtxCredentials: + return t.creds + case fs.CtxRoot: + return t.FSContext().RootDirectory() + case ktime.CtxRealtimeClock: + return t.k.RealtimeClock() + case limits.CtxLimits: + return t.tg.limits + case platform.CtxPlatform: + return t.k + case uniqueid.CtxGlobalUniqueID: + return t.k.UniqueID() + case uniqueid.CtxInotifyCookie: + return t.k.GenerateInotifyCookie() + default: + return nil + } +} + +// SetClearTID sets t's cleartid. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) SetClearTID(addr usermem.Addr) { + t.cleartid = addr +} + +// SetSyscallRestartBlock sets the restart block for use in +// restart_syscall(2). After registering a restart block, a syscall should +// return ERESTART_RESTARTBLOCK to request a restart using the block. +// +// Precondition: The caller must be running on the task goroutine. +func (t *Task) SetSyscallRestartBlock(r SyscallRestartBlock) { + t.syscallRestartBlock = r +} + +// SyscallRestartBlock returns the currently registered restart block for use in +// restart_syscall(2). This function is *not* idempotent and may be called once +// per syscall. This function must not be called if a restart block has not been +// registered for the current syscall. +// +// Precondition: The caller must be running on the task goroutine. +func (t *Task) SyscallRestartBlock() SyscallRestartBlock { + r := t.syscallRestartBlock + // Explicitly set the restart block to nil so that a future syscall can't + // accidentally reuse it. + t.syscallRestartBlock = nil + return r +} diff --git a/pkg/sentry/kernel/task_acct.go b/pkg/sentry/kernel/task_acct.go new file mode 100644 index 000000000..ce12cdb64 --- /dev/null +++ b/pkg/sentry/kernel/task_acct.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// Accounting, limits, timers. + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" +) + +// IOUsage returns the io usage of the thread. +func (t *Task) IOUsage() *usage.IO { + return t.ioUsage +} + +// IOUsage returns the total io usage of all dead and live threads in the group. +func (tg *ThreadGroup) IOUsage() *usage.IO { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + + io := *tg.ioUsage + // Account for active tasks. + for t := tg.tasks.Front(); t != nil; t = t.Next() { + io.Accumulate(t.IOUsage()) + } + return &io +} + +// Name returns t's name. +func (t *Task) Name() string { + t.mu.Lock() + defer t.mu.Unlock() + return t.tc.Name +} + +// SetName changes t's name. +func (t *Task) SetName(name string) { + t.mu.Lock() + defer t.mu.Unlock() + t.tc.Name = name + t.Debugf("Set thread name to %q", name) +} + +// SetCPUTimer is used by setrlimit(RLIMIT_CPU) to enforce the hard and soft +// limits on CPU time used by this process. +func (tg *ThreadGroup) SetCPUTimer(l *limits.Limit) { + tg.Timer().applyCPULimits(*l) +} + +// Limits implements context.Context.Limits. +func (t *Task) Limits() *limits.LimitSet { + return t.ThreadGroup().Limits() +} + +// StartTime returns t's start time. +func (t *Task) StartTime() ktime.Time { + t.mu.Lock() + defer t.mu.Unlock() + return t.startTime +} + +// MaxRSS returns the maximum resident set size of the task in bytes. which +// should be one of RUSAGE_SELF, RUSAGE_CHILDREN, RUSAGE_THREAD, or +// RUSAGE_BOTH. See getrusage(2) for documentation on the behavior of these +// flags. +func (t *Task) MaxRSS(which int32) uint64 { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + + switch which { + case linux.RUSAGE_SELF, linux.RUSAGE_THREAD: + // If there's an active mm we can use its value. + if mm := t.MemoryManager(); mm != nil { + if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > t.tg.maxRSS { + return mmMaxRSS + } + } + return t.tg.maxRSS + case linux.RUSAGE_CHILDREN: + return t.tg.childMaxRSS + case linux.RUSAGE_BOTH: + maxRSS := t.tg.maxRSS + if maxRSS < t.tg.childMaxRSS { + maxRSS = t.tg.childMaxRSS + } + if mm := t.MemoryManager(); mm != nil { + if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > maxRSS { + return mmMaxRSS + } + } + return maxRSS + default: + // We'll only get here if which is invalid. + return 0 + } +} diff --git a/pkg/sentry/kernel/task_block.go b/pkg/sentry/kernel/task_block.go new file mode 100644 index 000000000..9fd24f134 --- /dev/null +++ b/pkg/sentry/kernel/task_block.go @@ -0,0 +1,207 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "time" + + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// BlockWithTimeout blocks t until an event is received from C, the application +// monotonic clock indicates that timeout has elapsed (only if haveTimeout is true), +// or t is interrupted. It returns: +// +// - The remaining timeout, which is guaranteed to be 0 if the timeout expired, +// and is unspecified if haveTimeout is false. +// +// - An error which is nil if an event is received from C, ETIMEDOUT if the timeout +// expired, and syserror.ErrInterrupted if t is interrupted. +func (t *Task) BlockWithTimeout(C chan struct{}, haveTimeout bool, timeout time.Duration) (time.Duration, error) { + if !haveTimeout { + return timeout, t.block(C, nil) + } + + start := t.Kernel().MonotonicClock().Now() + deadline := start.Add(timeout) + err := t.BlockWithDeadline(C, true, deadline) + + // Timeout, explicitly return a remaining duration of 0. + if err == syserror.ETIMEDOUT { + return 0, err + } + + // Compute the remaining timeout. Note that even if block() above didn't + // return due to a timeout, we may have used up any of the remaining time + // since then. We cap the remaining timeout to 0 to make it easier to + // directly use the returned duration. + end := t.Kernel().MonotonicClock().Now() + remainingTimeout := timeout - end.Sub(start) + if remainingTimeout < 0 { + remainingTimeout = 0 + } + + return remainingTimeout, err +} + +// BlockWithDeadline blocks t until an event is received from C, the +// application monotonic clock indicates a time of deadline (only if +// haveDeadline is true), or t is interrupted. It returns nil if an event is +// received from C, ETIMEDOUT if the deadline expired, and +// syserror.ErrInterrupted if t is interrupted. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) BlockWithDeadline(C chan struct{}, haveDeadline bool, deadline ktime.Time) error { + if !haveDeadline { + return t.block(C, nil) + } + + // Start the timeout timer. + t.blockingTimer.Swap(ktime.Setting{ + Enabled: true, + Next: deadline, + }) + + err := t.block(C, t.blockingTimerChan) + + // Stop the timeout timer and drain the channel. + t.blockingTimer.Swap(ktime.Setting{}) + select { + case <-t.blockingTimerChan: + default: + } + + return err +} + +// BlockWithTimer blocks t until an event is received from C or tchan, or t is +// interrupted. It returns nil if an event is received from C, ETIMEDOUT if an +// event is received from tchan, and syserror.ErrInterrupted if t is +// interrupted. +// +// Most clients should use BlockWithDeadline or BlockWithTimeout instead. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) BlockWithTimer(C chan struct{}, tchan <-chan struct{}) error { + return t.block(C, tchan) +} + +// Block blocks t until an event is received from C or t is interrupted. It +// returns nil if an event is received from C and syserror.ErrInterrupted if t +// is interrupted. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) Block(C chan struct{}) error { + return t.block(C, nil) +} + +// block blocks a task on one of many events. +// N.B. defer is too expensive to be used here. +func (t *Task) block(C chan struct{}, timerChan <-chan struct{}) error { + // Fast path if the request is already done. + select { + case <-C: + return nil + default: + } + + // Deactive our address space, we don't need it. + interrupt := t.SleepStart() + + select { + case <-C: + t.SleepFinish(true) + return nil + + case <-interrupt: + t.SleepFinish(false) + // Return the indicated error on interrupt. + return syserror.ErrInterrupted + + case <-timerChan: + // We've timed out. + t.SleepFinish(true) + return syserror.ETIMEDOUT + } +} + +// SleepStart implements amutex.Sleeper.SleepStart. +func (t *Task) SleepStart() <-chan struct{} { + t.Deactivate() + t.accountTaskGoroutineEnter(TaskGoroutineBlockedInterruptible) + return t.interruptChan +} + +// SleepFinish implements amutex.Sleeper.SleepFinish. +func (t *Task) SleepFinish(success bool) { + if !success { + // The interrupted notification is consumed only at the top-level + // (Run). Therefore we attempt to reset the pending notification. + // This will also elide our next entry back into the task, so we + // will process signals, state changes, etc. + t.interruptSelf() + } + t.accountTaskGoroutineLeave(TaskGoroutineBlockedInterruptible) + t.Activate() +} + +// UninterruptibleSleepStart implements context.Context.UninterruptibleSleepStart. +func (t *Task) UninterruptibleSleepStart(deactivate bool) { + if deactivate { + t.Deactivate() + } + t.accountTaskGoroutineEnter(TaskGoroutineBlockedUninterruptible) +} + +// UninterruptibleSleepFinish implements context.Context.UninterruptibleSleepFinish. +func (t *Task) UninterruptibleSleepFinish(activate bool) { + t.accountTaskGoroutineLeave(TaskGoroutineBlockedUninterruptible) + if activate { + t.Activate() + } +} + +// interrupted returns true if interrupt or interruptSelf has been called at +// least once since the last call to interrupted. +func (t *Task) interrupted() bool { + select { + case <-t.interruptChan: + return true + default: + return false + } +} + +// interrupt unblocks the task and interrupts it if it's currently running in +// userspace. +func (t *Task) interrupt() { + t.interruptSelf() + t.p.Interrupt() +} + +// interruptSelf is like Interrupt, but can only be called by the task +// goroutine. +func (t *Task) interruptSelf() { + select { + case t.interruptChan <- struct{}{}: + t.Debugf("Interrupt queued") + default: + t.Debugf("Dropping duplicate interrupt") + } + // platform.Context.Interrupt() is unnecessary since a task goroutine + // calling interruptSelf() cannot also be blocked in + // platform.Context.Switch(). +} diff --git a/pkg/sentry/kernel/task_clone.go b/pkg/sentry/kernel/task_clone.go new file mode 100644 index 000000000..3a74abdfb --- /dev/null +++ b/pkg/sentry/kernel/task_clone.go @@ -0,0 +1,475 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bpf" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SharingOptions controls what resources are shared by a new task created by +// Task.Clone, or an existing task affected by Task.Unshare. +type SharingOptions struct { + // If NewAddressSpace is true, the task should have an independent virtual + // address space. + NewAddressSpace bool + + // If NewSignalHandlers is true, the task should use an independent set of + // signal handlers. + NewSignalHandlers bool + + // If NewThreadGroup is true, the task should be the leader of its own + // thread group. TerminationSignal is the signal that the thread group + // will send to its parent when it exits. If NewThreadGroup is false, + // TerminationSignal is ignored. + NewThreadGroup bool + TerminationSignal linux.Signal + + // If NewPIDNamespace is true: + // + // - In the context of Task.Clone, the new task should be the init task + // (TID 1) in a new PID namespace. + // + // - In the context of Task.Unshare, the task should create a new PID + // namespace, and all subsequent clones of the task should be members of + // the new PID namespace. + NewPIDNamespace bool + + // If NewUserNamespace is true, the task should have an independent user + // namespace. + NewUserNamespace bool + + // If NewNetworkNamespace is true, the task should have an independent + // network namespace. (Note that network namespaces are not really + // implemented; see comment on Task.netns for details.) + NewNetworkNamespace bool + + // If NewFiles is true, the task should use an independent file descriptor + // table. + NewFiles bool + + // If NewFSContext is true, the task should have an independent FSContext. + NewFSContext bool + + // If NewUTSNamespace is true, the task should have an independent UTS + // namespace. + NewUTSNamespace bool + + // If NewIPCNamespace is true, the task should have an independent IPC + // namespace. + NewIPCNamespace bool +} + +// CloneOptions controls the behavior of Task.Clone. +type CloneOptions struct { + // SharingOptions defines the set of resources that the new task will share + // with its parent. + SharingOptions + + // Stack is the initial stack pointer of the new task. If Stack is 0, the + // new task will start with the same stack pointer as its parent. + Stack usermem.Addr + + // If SetTLS is true, set the new task's TLS (thread-local storage) + // descriptor to TLS. If SetTLS is false, TLS is ignored. + SetTLS bool + TLS usermem.Addr + + // If ChildClearTID is true, when the child exits, 0 is written to the + // address ChildTID in the child's memory, and if the write is successful a + // futex wake on the same address is performed. + // + // If ChildSetTID is true, the child's thread ID (in the child's PID + // namespace) is written to address ChildTID in the child's memory. (As in + // Linux, failed writes are silently ignored.) + ChildClearTID bool + ChildSetTID bool + ChildTID usermem.Addr + + // If ParentSetTID is true, the child's thread ID (in the parent's PID + // namespace) is written to address ParentTID in the parent's memory. (As + // in Linux, failed writes are silently ignored.) + // + // Older versions of the clone(2) man page state that CLONE_PARENT_SETTID + // causes the child's thread ID to be written to ptid in both the parent + // and child's memory, but this is a documentation error fixed by + // 87ab04792ced ("clone.2: Fix description of CLONE_PARENT_SETTID"). + ParentSetTID bool + ParentTID usermem.Addr + + // If Vfork is true, place the parent in vforkStop until the cloned task + // releases its TaskContext. + Vfork bool + + // If Untraced is true, do not report PTRACE_EVENT_CLONE/FORK/VFORK for + // this clone(), and do not ptrace-attach the caller's tracer to the new + // task. (PTRACE_EVENT_VFORK_DONE will still be reported if appropriate). + Untraced bool + + // If InheritTracer is true, ptrace-attach the caller's tracer to the new + // task, even if no PTRACE_EVENT_CLONE/FORK/VFORK event would be reported + // for it. If both Untraced and InheritTracer are true, no event will be + // reported, but tracer inheritance will still occur. + InheritTracer bool +} + +// Clone implements the clone(2) syscall and returns the thread ID of the new +// task in t's PID namespace. Clone may return both a non-zero thread ID and a +// non-nil error. +// +// Preconditions: The caller must be running Task.doSyscallInvoke on the task +// goroutine. +func (t *Task) Clone(opts *CloneOptions) (ThreadID, *SyscallControl, error) { + // Since signal actions may refer to application signal handlers by virtual + // address, any set of signal handlers must refer to the same address + // space. + if !opts.NewSignalHandlers && opts.NewAddressSpace { + return 0, nil, syserror.EINVAL + } + // In order for the behavior of thread-group-directed signals to be sane, + // all tasks in a thread group must share signal handlers. + if !opts.NewThreadGroup && opts.NewSignalHandlers { + return 0, nil, syserror.EINVAL + } + // All tasks in a thread group must be in the same PID namespace. + if !opts.NewThreadGroup && (opts.NewPIDNamespace || t.childPIDNamespace != nil) { + return 0, nil, syserror.EINVAL + } + // The two different ways of specifying a new PID namespace are + // incompatible. + if opts.NewPIDNamespace && t.childPIDNamespace != nil { + return 0, nil, syserror.EINVAL + } + // Thread groups and FS contexts cannot span user namespaces. + if opts.NewUserNamespace && (!opts.NewThreadGroup || !opts.NewFSContext) { + return 0, nil, syserror.EINVAL + } + + // "If CLONE_NEWUSER is specified along with other CLONE_NEW* flags in a + // single clone(2) or unshare(2) call, the user namespace is guaranteed to + // be created first, giving the child (clone(2)) or caller (unshare(2)) + // privileges over the remaining namespaces created by the call." - + // user_namespaces(7) + creds := t.Credentials() + var userns *auth.UserNamespace + if opts.NewUserNamespace { + var err error + // "EPERM (since Linux 3.9): CLONE_NEWUSER was specified in flags and + // the caller is in a chroot environment (i.e., the caller's root + // directory does not match the root directory of the mount namespace + // in which it resides)." - clone(2). Neither chroot(2) nor + // user_namespaces(7) document this. + if t.IsChrooted() { + return 0, nil, syserror.EPERM + } + userns, err = creds.NewChildUserNamespace() + if err != nil { + return 0, nil, err + } + } + if (opts.NewPIDNamespace || opts.NewNetworkNamespace || opts.NewUTSNamespace) && !creds.HasCapability(linux.CAP_SYS_ADMIN) { + return 0, nil, syserror.EPERM + } + + utsns := t.UTSNamespace() + if opts.NewUTSNamespace { + // Note that this must happen after NewUserNamespace so we get + // the new userns if there is one. + utsns = t.UTSNamespace().Clone(userns) + } + + ipcns := t.IPCNamespace() + if opts.NewIPCNamespace { + // Note that "If CLONE_NEWIPC is set, then create the process in a new IPC + // namespace" + ipcns = NewIPCNamespace() + } + + tc, err := t.tc.Fork(t, !opts.NewAddressSpace) + if err != nil { + return 0, nil, err + } + // clone() returns 0 in the child. + tc.Arch.SetReturn(0) + if opts.Stack != 0 { + tc.Arch.SetStack(uintptr(opts.Stack)) + } + if opts.SetTLS { + tc.Arch.StateData().Regs.Fs_base = uint64(opts.TLS) + } + + pidns := t.tg.pidns + if t.childPIDNamespace != nil { + pidns = t.childPIDNamespace + } else if opts.NewPIDNamespace { + pidns = pidns.NewChild(userns) + } + tg := t.tg + parent := t.parent + if opts.NewThreadGroup { + sh := t.tg.signalHandlers + if opts.NewSignalHandlers { + sh = sh.Fork() + } + tg = NewThreadGroup(pidns, sh, opts.TerminationSignal, tg.limits.GetCopy(), t.k.monotonicClock) + parent = t + } + cfg := &TaskConfig{ + Kernel: t.k, + Parent: parent, + ThreadGroup: tg, + TaskContext: tc, + TaskResources: t.tr.Fork(!opts.NewFiles, !opts.NewFSContext), + Niceness: t.Niceness(), + Credentials: creds.Fork(), + NetworkNamespaced: t.netns, + AllowedCPUMask: t.CPUMask(), + UTSNamespace: utsns, + IPCNamespace: ipcns, + } + if opts.NewNetworkNamespace { + cfg.NetworkNamespaced = true + } + nt, err := t.tg.pidns.owner.NewTask(cfg) + if err != nil { + if opts.NewThreadGroup { + tg.release() + } + return 0, nil, err + } + + // "A child process created via fork(2) inherits a copy of its parent's + // alternate signal stack settings" - sigaltstack(2). + // + // However kernel/fork.c:copy_process() adds a limitation to this: + // "sigaltstack should be cleared when sharing the same VM". + if opts.NewAddressSpace || opts.Vfork { + nt.SetSignalStack(t.SignalStack()) + } + + if userns != nil { + if err := nt.SetUserNamespace(userns); err != nil { + // This shouldn't be possible: userns was created from nt.creds, so + // nt should have CAP_SYS_ADMIN in userns. + panic("Task.Clone: SetUserNamespace failed: " + err.Error()) + } + } + + // This has to happen last, because e.g. ptraceClone may send a SIGSTOP to + // nt that it must receive before its task goroutine starts running. + tid := nt.k.tasks.Root.IDOfTask(nt) + defer nt.Start(tid) + + // "If fork/clone and execve are allowed by @prog, any child processes will + // be constrained to the same filters and system call ABI as the parent." - + // Documentation/prctl/seccomp_filter.txt + nt.syscallFilters = append([]bpf.Program(nil), t.syscallFilters...) + if opts.Vfork { + nt.vforkParent = t + } + + if opts.ChildClearTID { + nt.SetClearTID(opts.ChildTID) + } + if opts.ChildSetTID { + // Can't use Task.CopyOut, which assumes AddressSpaceActive. + usermem.CopyObjectOut(t, nt.MemoryManager(), opts.ChildTID, nt.ThreadID(), usermem.IOOpts{}) + } + ntid := t.tg.pidns.IDOfTask(nt) + if opts.ParentSetTID { + t.CopyOut(opts.ParentTID, ntid) + } + + kind := ptraceCloneKindClone + if opts.Vfork { + kind = ptraceCloneKindVfork + } else if opts.TerminationSignal == linux.SIGCHLD { + kind = ptraceCloneKindFork + } + if t.ptraceClone(kind, nt, opts) { + if opts.Vfork { + return ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{vforkChild: nt, vforkChildTID: ntid}}, nil + } + return ntid, &SyscallControl{next: &runSyscallAfterPtraceEventClone{}}, nil + } + if opts.Vfork { + t.maybeBeginVforkStop(nt) + return ntid, &SyscallControl{next: &runSyscallAfterVforkStop{childTID: ntid}}, nil + } + return ntid, nil, nil +} + +// maybeBeginVforkStop checks if a previously-started vfork child is still +// running and has not yet released its MM, such that its parent t should enter +// a vforkStop. +// +// Preconditions: The caller must be running on t's task goroutine. +func (t *Task) maybeBeginVforkStop(child *Task) { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if t.killedLocked() { + child.vforkParent = nil + return + } + if child.vforkParent == t { + t.beginInternalStopLocked((*vforkStop)(nil)) + } +} + +func (t *Task) unstopVforkParent() { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + if p := t.vforkParent; p != nil { + p.tg.signalHandlers.mu.Lock() + defer p.tg.signalHandlers.mu.Unlock() + if _, ok := p.stop.(*vforkStop); ok { + p.endInternalStopLocked() + } + // Parent no longer needs to be unstopped. + t.vforkParent = nil + } +} + +type runSyscallAfterPtraceEventClone struct { + vforkChild *Task + + // If vforkChild is not nil, vforkChildTID is its thread ID in the parent's + // PID namespace. vforkChildTID must be stored since the child may exit and + // release its TID before the PTRACE_EVENT stop ends. + vforkChildTID ThreadID +} + +func (r *runSyscallAfterPtraceEventClone) execute(t *Task) taskRunState { + if r.vforkChild != nil { + t.maybeBeginVforkStop(r.vforkChild) + return &runSyscallAfterVforkStop{r.vforkChildTID} + } + return (*runSyscallExit)(nil) +} + +type runSyscallAfterVforkStop struct { + // childTID has the same meaning as + // runSyscallAfterPtraceEventClone.vforkChildTID. + childTID ThreadID +} + +func (r *runSyscallAfterVforkStop) execute(t *Task) taskRunState { + t.ptraceVforkDone(r.childTID) + return (*runSyscallExit)(nil) +} + +// Unshare changes the set of resources t shares with other tasks, as specified +// by opts. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) Unshare(opts *SharingOptions) error { + // In Linux unshare(2), NewThreadGroup implies NewSignalHandlers and + // NewSignalHandlers implies NewAddressSpace. All three flags are no-ops if + // t is the only task using its MM, which due to clone(2)'s rules imply + // that it is also the only task using its signal handlers / in its thread + // group, and cause EINVAL to be returned otherwise. + // + // Since we don't count the number of tasks using each address space or set + // of signal handlers, we reject NewSignalHandlers and NewAddressSpace + // altogether, and interpret NewThreadGroup as requiring that t be the only + // member of its thread group. This seems to be logically coherent, in the + // sense that clone(2) allows a task to share signal handlers and address + // spaces with tasks in other thread groups. + if opts.NewAddressSpace || opts.NewSignalHandlers { + return syserror.EINVAL + } + if opts.NewThreadGroup { + t.tg.signalHandlers.mu.Lock() + if t.tg.tasksCount != 1 { + t.tg.signalHandlers.mu.Unlock() + return syserror.EINVAL + } + t.tg.signalHandlers.mu.Unlock() + // This isn't racy because we're the only living task, and therefore + // the only task capable of creating new ones, in our thread group. + } + if opts.NewUserNamespace { + if t.IsChrooted() { + return syserror.EPERM + } + // This temporary is needed because Go. + creds := t.Credentials() + newUserNS, err := creds.NewChildUserNamespace() + if err != nil { + return err + } + err = t.SetUserNamespace(newUserNS) + if err != nil { + return err + } + } + haveCapSysAdmin := t.HasCapability(linux.CAP_SYS_ADMIN) + if opts.NewPIDNamespace { + if !haveCapSysAdmin { + return syserror.EPERM + } + t.childPIDNamespace = t.tg.pidns.NewChild(t.UserNamespace()) + } + t.mu.Lock() + defer t.mu.Unlock() + if opts.NewNetworkNamespace { + if !haveCapSysAdmin { + return syserror.EPERM + } + t.netns = true + } + if opts.NewUTSNamespace { + if !haveCapSysAdmin { + return syserror.EPERM + } + // Note that this must happen after NewUserNamespace, so the + // new user namespace is used if there is one. + t.utsns = t.utsns.Clone(t.creds.UserNamespace) + } + if opts.NewIPCNamespace { + if !haveCapSysAdmin { + return syserror.EPERM + } + // Note that "If CLONE_NEWIPC is set, then create the process in a new IPC + // namespace" + t.ipcns = NewIPCNamespace() + } + if opts.NewFiles { + oldFDMap := t.tr.FDMap + t.tr.FDMap = oldFDMap.Fork() + oldFDMap.DecRef() + } + if opts.NewFSContext { + oldFS := t.tr.FSContext + t.tr.FSContext = oldFS.Fork() + oldFS.DecRef() + } + return nil +} + +// vforkStop is a TaskStop imposed on a task that creates a child with +// CLONE_VFORK or vfork(2), that ends when the child task ceases to use its +// current MM. (Normally, CLONE_VFORK is used in conjunction with CLONE_VM, so +// that the child and parent share mappings until the child execve()s into a +// new process image or exits.) +type vforkStop struct{} + +// StopIgnoresKill implements TaskStop.Killable. +func (*vforkStop) Killable() bool { return true } diff --git a/pkg/sentry/kernel/task_context.go b/pkg/sentry/kernel/task_context.go new file mode 100644 index 000000000..5c563ba08 --- /dev/null +++ b/pkg/sentry/kernel/task_context.go @@ -0,0 +1,179 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "errors" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex" + "gvisor.googlesource.com/gvisor/pkg/sentry/loader" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// ErrNoSyscalls is returned if there is no syscall table. +var ErrNoSyscalls = errors.New("no syscall table found") + +// Auxmap contains miscellaneous data for the task. +type Auxmap map[string]interface{} + +// TaskContext is the subset of a task's data that is provided by the loader. +type TaskContext struct { + // Name is the thread name set by the prctl(PR_SET_NAME) system call. + Name string + + // Arch is the architecture-specific context (registers, etc.) + Arch arch.Context + + // MemoryManager is the task's address space. + MemoryManager *mm.MemoryManager + + // fu implements futexes in the address space. + fu *futex.Manager + + // st is the task's syscall table. + st *SyscallTable +} + +// release releases all resources held by the TaskContext. release is called by +// the task when it execs into a new TaskContext or exits. +func (tc *TaskContext) release() { + // Nil out pointers so that if the task is saved after release, it doesn't + // follow the pointers to possibly now-invalid objects. + if tc.MemoryManager != nil { + // TODO + tc.MemoryManager.DecUsers(context.Background()) + tc.MemoryManager = nil + } + tc.fu = nil +} + +// Fork returns a duplicate of tc. The copied TaskContext always has an +// independent arch.Context. If shareAddressSpace is true, the copied +// TaskContext shares an address space with the original; otherwise, the copied +// TaskContext has an independent address space that is initially a duplicate +// of the original's. +func (tc *TaskContext) Fork(ctx context.Context, shareAddressSpace bool) (*TaskContext, error) { + newTC := &TaskContext{ + Arch: tc.Arch.Fork(), + st: tc.st, + } + if shareAddressSpace { + newTC.MemoryManager = tc.MemoryManager + if newTC.MemoryManager != nil { + if !newTC.MemoryManager.IncUsers() { + // Shouldn't be possible since tc.MemoryManager should be a + // counted user. + panic(fmt.Sprintf("TaskContext.Fork called with userless TaskContext.MemoryManager")) + } + } + newTC.fu = tc.fu + } else { + newMM, err := tc.MemoryManager.Fork(ctx) + if err != nil { + return nil, err + } + newTC.MemoryManager = newMM + // TODO: revisit when shmem is supported. + newTC.fu = futex.NewManager() + } + return newTC, nil +} + +// Arch returns t's arch.Context. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) Arch() arch.Context { + return t.tc.Arch +} + +// MemoryManager returns t's MemoryManager. MemoryManager does not take an +// additional reference on the returned MM. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) MemoryManager() *mm.MemoryManager { + return t.tc.MemoryManager +} + +// Futex returns t's futex manager. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) Futex() *futex.Manager { + return t.tc.fu +} + +// SyscallTable returns t's syscall table. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) SyscallTable() *SyscallTable { + return t.tc.st +} + +// Stack returns the userspace stack. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) Stack() *arch.Stack { + return &arch.Stack{t.Arch(), t.MemoryManager(), usermem.Addr(t.Arch().Stack())} +} + +// LoadTaskImage loads filename into a new TaskContext. +// +// It takes several arguments: +// * mounts: MountNamespace to lookup filename in +// * root: Root to lookup filename under +// * wd: Working directory to lookup filename under +// * maxTraversals: maximum number of symlinks to follow +// * filename: path to binary to load +// * argv: Binary argv +// * envv: Binary envv +// * fs: Binary FeatureSet +func (k *Kernel) LoadTaskImage(ctx context.Context, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, filename string, argv, envv []string, fs *cpuid.FeatureSet) (*TaskContext, error) { + // Prepare a new user address space to load into. + m := mm.NewMemoryManager(k) + defer m.DecUsers(ctx) + + os, ac, name, err := loader.Load(ctx, m, mounts, root, wd, maxTraversals, fs, filename, argv, envv, k.extraAuxv, k.vdso) + if err != nil { + return nil, err + } + + // Lookup our new syscall table. + st, ok := LookupSyscallTable(os, ac.Arch()) + if !ok { + // No syscall table found. Yikes. + return nil, ErrNoSyscalls + } + + if !m.IncUsers() { + panic("Failed to increment users count on new MM") + } + return &TaskContext{ + Name: name, + Arch: ac, + MemoryManager: m, + fu: futex.NewManager(), + st: st, + }, nil +} diff --git a/pkg/sentry/kernel/task_exec.go b/pkg/sentry/kernel/task_exec.go new file mode 100644 index 000000000..2285847a2 --- /dev/null +++ b/pkg/sentry/kernel/task_exec.go @@ -0,0 +1,240 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// This file implements the machinery behind the execve() syscall. In brief, a +// thread executes an execve() by killing all other threads in its thread +// group, assuming the leader's identity, and then switching process images. +// +// This design is effectively mandated by Linux. From ptrace(2): +// +// """ +// execve(2) under ptrace +// When one thread in a multithreaded process calls execve(2), the +// kernel destroys all other threads in the process, and resets the +// thread ID of the execing thread to the thread group ID (process ID). +// (Or, to put things another way, when a multithreaded process does an +// execve(2), at completion of the call, it appears as though the +// execve(2) occurred in the thread group leader, regardless of which +// thread did the execve(2).) This resetting of the thread ID looks +// very confusing to tracers: +// +// * All other threads stop in PTRACE_EVENT_EXIT stop, if the +// PTRACE_O_TRACEEXIT option was turned on. Then all other threads +// except the thread group leader report death as if they exited via +// _exit(2) with exit code 0. +// +// * The execing tracee changes its thread ID while it is in the +// execve(2). (Remember, under ptrace, the "pid" returned from +// waitpid(2), or fed into ptrace calls, is the tracee's thread ID.) +// That is, the tracee's thread ID is reset to be the same as its +// process ID, which is the same as the thread group leader's thread +// ID. +// +// * Then a PTRACE_EVENT_EXEC stop happens, if the PTRACE_O_TRACEEXEC +// option was turned on. +// +// * If the thread group leader has reported its PTRACE_EVENT_EXIT stop +// by this time, it appears to the tracer that the dead thread leader +// "reappears from nowhere". (Note: the thread group leader does not +// report death via WIFEXITED(status) until there is at least one +// other live thread. This eliminates the possibility that the +// tracer will see it dying and then reappearing.) If the thread +// group leader was still alive, for the tracer this may look as if +// thread group leader returns from a different system call than it +// entered, or even "returned from a system call even though it was +// not in any system call". If the thread group leader was not +// traced (or was traced by a different tracer), then during +// execve(2) it will appear as if it has become a tracee of the +// tracer of the execing tracee. +// +// All of the above effects are the artifacts of the thread ID change in +// the tracee. +// """ + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// execStop is a TaskStop that a task sets on itself when it wants to execve +// and is waiting for the other tasks in its thread group to exit first. +type execStop struct{} + +// Killable implements TaskStop.Killable. +func (*execStop) Killable() bool { return true } + +// Execve implements the execve(2) syscall by killing all other tasks in its +// thread group and switching to newTC. Execve always takes ownership of newTC. +// +// Preconditions: The caller must be running Task.doSyscallInvoke on the task +// goroutine. +func (t *Task) Execve(newTC *TaskContext) (*SyscallControl, error) { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + + if t.tg.exiting || t.tg.execing != nil { + // We lost to a racing group-exit, kill, or exec from another thread + // and should just exit. + newTC.release() + return nil, syserror.EINTR + } + + // Cancel any racing group stops. + t.tg.endGroupStopLocked(false) + + // If the task has any siblings, they have to exit before the exec can + // continue. + t.tg.execing = t + if t.tg.tasks.Front() != t.tg.tasks.Back() { + // "[All] other threads except the thread group leader report death as + // if they exited via _exit(2) with exit code 0." - ptrace(2) + for sibling := t.tg.tasks.Front(); sibling != nil; sibling = sibling.Next() { + if t != sibling { + sibling.killLocked() + } + } + // The last sibling to exit will wake t. + t.beginInternalStopLocked((*execStop)(nil)) + } + + return &SyscallControl{next: &runSyscallAfterExecStop{newTC}, ignoreReturn: true}, nil +} + +// The runSyscallAfterExecStop state continues execve(2) after all siblings of +// a thread in the execve syscall have exited. +type runSyscallAfterExecStop struct { + tc *TaskContext +} + +func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState { + t.tg.pidns.owner.mu.Lock() + t.tg.execing = nil + if t.killed() { + t.tg.pidns.owner.mu.Unlock() + r.tc.release() + return (*runInterrupt)(nil) + } + // We are the thread group leader now. Save our old thread ID for + // PTRACE_EVENT_EXEC. This is racy in that if a tracer attaches after this + // point it will get a PID of 0, but this is consistent with Linux. + oldTID := ThreadID(0) + if tracer := t.Tracer(); tracer != nil { + oldTID = tracer.tg.pidns.tids[t] + } + t.promoteLocked() + // "During an execve(2), the dispositions of handled signals are reset to + // the default; the dispositions of ignored signals are left unchanged. ... + // [The] signal mask is preserved across execve(2). ... [The] pending + // signal set is preserved across an execve(2)." - signal(7) + // + // Details: + // + // - If the thread group is sharing its signal handlers with another thread + // group via CLONE_SIGHAND, execve forces the signal handlers to be copied + // (see Linux's fs/exec.c:de_thread). We're not reference-counting signal + // handlers, so we always make a copy. + // + // - "Disposition" only means sigaction::sa_handler/sa_sigaction; flags, + // restorer (if present), and mask are always reset. (See Linux's + // fs/exec.c:setup_new_exec => kernel/signal.c:flush_signal_handlers.) + t.tg.signalHandlers = t.tg.signalHandlers.CopyForExec() + t.endStopCond.L = &t.tg.signalHandlers.mu + // "Any alternate signal stack is not preserved (sigaltstack(2))." - execve(2) + t.signalStack = arch.SignalStack{Flags: arch.SignalStackFlagDisable} + // "The termination signal is reset to SIGCHLD (see clone(2))." + t.tg.terminationSignal = linux.SIGCHLD + // execed indicates that the process can no longer join a process group + // in some scenarios (namely, the parent call setpgid(2) on the child). + // See the JoinProcessGroup function in sessions.go for more context. + t.tg.execed = true + // Maximum RSS is preserved across execve(2). + t.updateRSSLocked() + // Restartable sequence state is discarded. + t.rseqPreempted = false + t.rseqCPUAddr = 0 + t.rseqCPU = -1 + t.tg.rscr.Store(&RSEQCriticalRegion{}) + t.tg.pidns.owner.mu.Unlock() + + // Remove FDs with the CloseOnExec flag set. + t.FDMap().RemoveIf(func(file *fs.File, flags FDFlags) bool { + return flags.CloseOnExec + }) + + // Switch to the new process. + t.MemoryManager().Deactivate() + t.mu.Lock() + // Update credentials to reflect the execve. This should precede switching + // MMs to ensure that dumpability has been reset first, if needed. + t.updateCredsForExecLocked() + t.tc.release() + t.tc = *r.tc + t.mu.Unlock() + t.unstopVforkParent() + // NOTE: All locks must be dropped prior to calling Activate. + t.MemoryManager().Activate() + + t.ptraceExec(oldTID) + return (*runSyscallExit)(nil) +} + +// promoteLocked makes t the leader of its thread group. If t is already the +// thread group leader, promoteLocked is a no-op. +// +// Preconditions: All other tasks in t's thread group, including the existing +// leader (if it is not t), have reached TaskExitZombie. The TaskSet mutex must +// be locked for writing. +func (t *Task) promoteLocked() { + oldLeader := t.tg.leader + if t == oldLeader { + return + } + // Swap the leader's TIDs with the execing task's. The latter will be + // released when the old leader is reaped below. + for ns := t.tg.pidns; ns != nil; ns = ns.parent { + oldTID, leaderTID := ns.tids[t], ns.tids[oldLeader] + ns.tids[oldLeader] = oldTID + ns.tids[t] = leaderTID + ns.tasks[oldTID] = oldLeader + ns.tasks[leaderTID] = t + } + + // Inherit the old leader's start time. + oldStartTime := oldLeader.StartTime() + t.mu.Lock() + t.startTime = oldStartTime + t.mu.Unlock() + + t.tg.leader = t + t.Infof("Becoming TID %d (in root PID namespace)", t.tg.pidns.owner.Root.tids[t]) + t.updateLogPrefixLocked() + // Reap the original leader. If it has a tracer, detach it instead of + // waiting for it to acknowledge the original leader's death. + oldLeader.exitParentNotified = true + oldLeader.exitParentAcked = true + if tracer := oldLeader.Tracer(); tracer != nil { + delete(tracer.ptraceTracees, oldLeader) + oldLeader.forgetTracerLocked() + // Notify the tracer that it will no longer be receiving these events + // from the tracee. + tracer.tg.eventQueue.Notify(EventExit | EventTraceeStop | EventGroupContinue) + } + oldLeader.exitNotifyLocked(false) +} diff --git a/pkg/sentry/kernel/task_exit.go b/pkg/sentry/kernel/task_exit.go new file mode 100644 index 000000000..3d49ae350 --- /dev/null +++ b/pkg/sentry/kernel/task_exit.go @@ -0,0 +1,1139 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// This file implements the task exit cycle: +// +// - Tasks are asynchronously requested to exit with Task.Kill. +// +// - When able, the task goroutine enters the exit path starting from state +// runExit. +// +// - Other tasks observe completed exits with Task.Wait (which implements the +// wait*() family of syscalls). + +import ( + "errors" + "fmt" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// An ExitStatus is a value communicated from an exiting task or thread group +// to the party that reaps it. +type ExitStatus struct { + // Code is the numeric value passed to the call to exit or exit_group that + // caused the exit. If the exit was not caused by such a call, Code is 0. + Code int + + // Signo is the signal that caused the exit. If the exit was not caused by + // a signal, Signo is 0. + Signo int +} + +// Signaled returns true if the ExitStatus indicates that the exiting task or +// thread group was killed by a signal. +func (es ExitStatus) Signaled() bool { + return es.Signo != 0 +} + +// Status returns the numeric representation of the ExitStatus returned by e.g. +// the wait4() system call. +func (es ExitStatus) Status() uint32 { + return ((uint32(es.Code) & 0xff) << 8) | (uint32(es.Signo) & 0xff) +} + +// ShellExitCode returns the numeric exit code that Bash would return for an +// exit status of es. +func (es ExitStatus) ShellExitCode() int { + if es.Signaled() { + return 128 + es.Signo + } + return es.Code +} + +// TaskExitState represents a step in the task exit path. +// +// "Exiting" and "exited" are often ambiguous; prefer to name specific states. +type TaskExitState int + +const ( + // TaskExitNone indicates that the task has not begun exiting. + TaskExitNone TaskExitState = iota + + // TaskExitInitiated indicates that the task goroutine has entered the exit + // path, and the task is no longer eligible to participate in group stops + // or group signal handling. TaskExitInitiated is analogous to Linux's + // PF_EXITING. + TaskExitInitiated + + // TaskExitZombie indicates that the task has released its resources, and + // the task no longer prevents a sibling thread from completing execve. + TaskExitZombie + + // TaskExitDead indicates that the task's thread IDs have been released, + // and the task no longer prevents its thread group leader from being + // reaped. ("Reaping" refers to the transitioning of a task from + // TaskExitZombie to TaskExitDead.) + TaskExitDead +) + +// String implements fmt.Stringer. +func (t TaskExitState) String() string { + switch t { + case TaskExitNone: + return "TaskExitNone" + case TaskExitInitiated: + return "TaskExitInitiated" + case TaskExitZombie: + return "TaskExitZombie" + case TaskExitDead: + return "TaskExitDead" + default: + return strconv.Itoa(int(t)) + } +} + +// killLocked marks t as killed by enqueueing a SIGKILL, without causing the +// thread-group-affecting side effects SIGKILL usually has. +// +// Preconditions: The signal mutex must be locked. +func (t *Task) killLocked() { + // Clear killable stops. + if t.stop != nil && t.stop.Killable() { + t.endInternalStopLocked() + } + t.groupStopRequired = false + t.pendingSignals.enqueue(&arch.SignalInfo{ + Signo: int32(linux.SIGKILL), + // Linux just sets SIGKILL in the pending signal bitmask without + // enqueueing an actual siginfo, such that + // kernel/signal.c:collect_signal() initalizes si_code to SI_USER. + Code: arch.SignalInfoUser, + }) + t.interrupt() +} + +// killed returns true if t has a SIGKILL pending. killed is analogous to +// Linux's fatal_signal_pending(). +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) killed() bool { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + return t.killedLocked() +} + +func (t *Task) killedLocked() bool { + return t.pendingSignals.pendingSet&linux.SignalSetOf(linux.SIGKILL) != 0 +} + +// PrepareExit indicates an exit with status es. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) PrepareExit(es ExitStatus) { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.exitStatus = es +} + +// PrepareGroupExit indicates a group exit with status es to t's thread group. +// +// PrepareGroupExit is analogous to Linux's do_group_exit(), except that it +// does not tail-call do_exit(), except that it *does* set Task.exitStatus. +// (Linux does not do so until within do_exit(), since it reuses exit_code for +// ptrace.) +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) PrepareGroupExit(es ExitStatus) { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if t.tg.exiting || t.tg.execing != nil { + // Note that if t.tg.exiting is false but t.tg.execing is not nil, i.e. + // this "group exit" is being executed by the killed sibling of an + // execing task, then Task.Execve never set t.tg.exitStatus, so it's + // still the zero value. This is consistent with Linux, both in intent + // ("all other threads ... report death as if they exited via _exit(2) + // with exit code 0" - ptrace(2), "execve under ptrace") and in + // implementation (compare fs/exec.c:de_thread() => + // kernel/signal.c:zap_other_threads() and + // kernel/exit.c:do_group_exit() => + // include/linux/sched.h:signal_group_exit()). + t.exitStatus = t.tg.exitStatus + return + } + t.tg.exiting = true + t.tg.exitStatus = es + t.exitStatus = es + for sibling := t.tg.tasks.Front(); sibling != nil; sibling = sibling.Next() { + if sibling != t { + sibling.killLocked() + } + } +} + +// Kill requests that all tasks in ts exit as if group exiting with status es. +// Kill does not wait for tasks to exit. +// +// Kill has no analogue in Linux; it's provided for save/restore only. +func (ts *TaskSet) Kill(es ExitStatus) { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.Root.exiting = true + for t := range ts.Root.tids { + t.tg.signalHandlers.mu.Lock() + if !t.tg.exiting { + t.tg.exiting = true + t.tg.exitStatus = es + } + t.killLocked() + t.tg.signalHandlers.mu.Unlock() + } +} + +// advanceExitStateLocked checks that t's current exit state is oldExit, then +// sets it to newExit. If t's current exit state is not oldExit, +// advanceExitStateLocked panics. +// +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) advanceExitStateLocked(oldExit, newExit TaskExitState) { + if t.exitState != oldExit { + panic(fmt.Sprintf("Transitioning from exit state %v to %v: unexpected preceding state %v", oldExit, newExit, t.exitState)) + } + t.Debugf("Transitioning from exit state %v to %v", oldExit, newExit) + t.exitState = newExit +} + +// runExit is the entry point into the task exit path. +type runExit struct{} + +func (*runExit) execute(t *Task) taskRunState { + t.ptraceExit() + return (*runExitMain)(nil) +} + +type runExitMain struct{} + +func (*runExitMain) execute(t *Task) taskRunState { + lastExiter := t.exitThreadGroup() + + // If the task has a cleartid, and the thread group wasn't killed by a + // signal, handle that before releasing the MM. + if t.cleartid != 0 { + t.tg.signalHandlers.mu.Lock() + signaled := t.tg.exiting && t.tg.exitStatus.Signaled() + t.tg.signalHandlers.mu.Unlock() + if !signaled { + if _, err := t.CopyOut(t.cleartid, ThreadID(0)); err == nil { + t.Futex().Wake(uintptr(t.cleartid), ^uint32(0), 1) + } + // If the CopyOut fails, there's nothing we can do. + } + } + + // Deactivate the address space before releasing the MM. + t.Deactivate() + + // Update the max resident set size before releasing t.tc.mm. + t.tg.pidns.owner.mu.Lock() + t.updateRSSLocked() + t.tg.pidns.owner.mu.Unlock() + + // Release all of the task's resources. + t.mu.Lock() + t.tc.release() + t.tr.release() + t.mu.Unlock() + t.unstopVforkParent() + + // If this is the last task to exit from the thread group, release the + // thread group's resources. + if lastExiter { + t.tg.release() + } + + // Detach tracees. + t.exitPtrace() + + // Reparent the task's children. + t.exitChildren() + + // Don't tail-call runExitNotify, as exitChildren may have initiated a stop + // to wait for a PID namespace to die. + return (*runExitNotify)(nil) +} + +// exitThreadGroup transitions t to TaskExitInitiated, indicating to t's thread +// group that it is no longer eligible to participate in group activities. It +// returns true if t is the last task in its thread group to call +// exitThreadGroup. +func (t *Task) exitThreadGroup() bool { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + t.tg.signalHandlers.mu.Lock() + // Can't defer unlock: see below. + + t.advanceExitStateLocked(TaskExitNone, TaskExitInitiated) + t.tg.activeTasks-- + last := t.tg.activeTasks == 0 + + // Ensure that someone will handle the signals we can't. + t.setSignalMaskLocked(^linux.SignalSet(0)) + + // Check if this task's exit interacts with an initiated group stop. + if t.tg.groupStopPhase != groupStopInitiated { + t.tg.signalHandlers.mu.Unlock() + return last + } + if t.groupStopAcknowledged { + // Un-acknowledge the group stop. + t.tg.groupStopCount-- + t.groupStopAcknowledged = false + // If the group stop wasn't complete before, then there is still at + // least one other task that hasn't acknowledged the group stop, so + // it is still not complete now. + t.tg.signalHandlers.mu.Unlock() + return last + } + if t.tg.groupStopCount != t.tg.activeTasks { + t.tg.signalHandlers.mu.Unlock() + return last + } + t.Debugf("Completing group stop") + t.tg.groupStopPhase = groupStopComplete + t.tg.groupStopWaitable = true + sig := t.tg.groupStopSignal + t.tg.groupContNotify = false + t.tg.groupContWaitable = false + // signalStop must be called with t's signal mutex unlocked. + t.tg.signalHandlers.mu.Unlock() + if t.tg.leader.parent != nil { + t.tg.leader.parent.signalStop(t, arch.CLD_STOPPED, int32(sig)) + t.tg.leader.parent.tg.eventQueue.Notify(EventChildGroupStop) + } + return last +} + +func (t *Task) exitChildren() { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + newParent := t.findReparentTargetLocked() + if newParent == nil { + // "If the init process of a PID namespace terminates, the kernel + // terminates all of the processes in the namespace via a SIGKILL + // signal." - pid_namespaces(7) + t.Debugf("Init process terminating, killing namespace") + t.tg.pidns.exiting = true + for other := range t.tg.pidns.tids { + if other.tg != t.tg { + other.tg.signalHandlers.mu.Lock() + other.sendSignalLocked(&arch.SignalInfo{ + Signo: int32(linux.SIGKILL), + }, false /* group */) + other.tg.signalHandlers.mu.Unlock() + } + } + // TODO: The init process waits for all processes in the + // namespace to exit before completing its own exit + // (kernel/pid_namespace.c:zap_pid_ns_processes()). Stop until all + // other tasks in the namespace are dead, except possibly for this + // thread group's leader (which can't be reaped until this task exits). + } + // This is correct even if newParent is nil (it ensures that children don't + // wait for a parent to reap them.) + for c := range t.children { + if sig := c.ParentDeathSignal(); sig != 0 { + siginfo := &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoUser, + } + siginfo.SetPid(int32(c.tg.pidns.tids[t])) + siginfo.SetUid(int32(t.Credentials().RealKUID.In(c.UserNamespace()).OrOverflow())) + c.tg.signalHandlers.mu.Lock() + c.sendSignalLocked(siginfo, true /* group */) + c.tg.signalHandlers.mu.Unlock() + } + c.reparentLocked(newParent) + if newParent != nil { + newParent.children[c] = struct{}{} + } + } +} + +// findReparentTargetLocked returns the task to which t's children should be +// reparented. If no such task exists, findNewParentLocked returns nil. +// +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) findReparentTargetLocked() *Task { + // Reparent to any sibling in the same thread group that hasn't begun + // exiting. + if t2 := t.tg.anyNonExitingTaskLocked(); t2 != nil { + return t2 + } + // "A child process that is orphaned within the namespace will be + // reparented to [the init process for the namespace] ..." - + // pid_namespaces(7) + if init := t.tg.pidns.tasks[InitTID]; init != nil { + return init.tg.anyNonExitingTaskLocked() + } + return nil +} + +func (tg *ThreadGroup) anyNonExitingTaskLocked() *Task { + for t := tg.tasks.Front(); t != nil; t = t.Next() { + if t.exitState == TaskExitNone { + return t + } + } + return nil +} + +// reparentLocked changes t's parent. The new parent may be nil. +// +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) reparentLocked(parent *Task) { + oldParent := t.parent + t.parent = parent + // If a thread group leader's parent changes, reset the thread group's + // termination signal to SIGCHLD and re-check exit notification. (Compare + // kernel/exit.c:reparent_leader().) + if t != t.tg.leader { + return + } + if oldParent == nil && parent == nil { + return + } + if oldParent != nil && parent != nil && oldParent.tg == parent.tg { + return + } + t.tg.terminationSignal = linux.SIGCHLD + if t.exitParentNotified && !t.exitParentAcked { + t.exitParentNotified = false + t.exitNotifyLocked(false) + } +} + +// When a task exits, other tasks in the system, notably the task's parent and +// ptracer, may want to be notified. The exit notification system ensures that +// interested tasks receive signals and/or are woken from blocking calls to +// wait*() syscalls; these notifications must be resolved before exiting tasks +// can be reaped and disappear from the system. +// +// Each task may have a parent task and/or a tracer task. If both a parent and +// a tracer exist, they may be the same task, different tasks in the same +// thread group, or tasks in different thread groups. (In the last case, Linux +// refers to the task as being ptrace-reparented due to an implementation +// detail; we avoid this terminology to avoid confusion.) +// +// A thread group is *empty* if all non-leader tasks in the thread group are +// dead, and the leader is either a zombie or dead. The exit of a thread group +// leader is never waitable - by either the parent or tracer - until the thread +// group is empty. +// +// There are a few ways for an exit notification to be resolved: +// +// - The exit notification may be acknowledged by a call to Task.Wait with +// WaitOptions.ConsumeEvent set (e.g. due to a wait4() syscall). +// +// - If the notified party is the parent, and the parent thread group is not +// also the tracer thread group, and the notification signal is SIGCHLD, the +// parent may explicitly ignore the notification (see quote in exitNotify). +// Note that it's possible for the notified party to ignore the signal in other +// cases, but the notification is only resolved under the above conditions. +// (Actually, there is one exception; see the last paragraph of the "leader, +// has tracer, tracer thread group is parent thread group" case below.) +// +// - If the notified party is the parent, and the parent does not exist, the +// notification is resolved as if ignored. (This is only possible in the +// sentry. In Linux, the only task / thread group without a parent is global +// init, and killing global init causes a kernel panic.) +// +// - If the notified party is a tracer, the tracer may detach the traced task. +// (Zombie tasks cannot be ptrace-attached, so the reverse is not possible.) +// +// In addition, if the notified party is the parent, the parent may exit and +// cause the notifying task to be reparented to another thread group. This does +// not resolve the notification; instead, the notification must be resent to +// the new parent. +// +// The series of notifications generated for a given task's exit depend on +// whether it is a thread group leader; whether the task is ptraced; and, if +// so, whether the tracer thread group is the same as the parent thread group. +// +// - Non-leader, no tracer: No notification is generated; the task is reaped +// immediately. +// +// - Non-leader, has tracer: SIGCHLD is sent to the tracer. When the tracer +// notification is resolved (by waiting or detaching), the task is reaped. (For +// non-leaders, whether the tracer and parent thread groups are the same is +// irrelevant.) +// +// - Leader, no tracer: The task remains a zombie, with no notification sent, +// until all other tasks in the thread group are dead. (In Linux terms, this +// condition is indicated by include/linux/sched.h:thread_group_empty(); tasks +// are removed from their thread_group list in kernel/exit.c:release_task() => +// __exit_signal() => __unhash_process().) Then the thread group's termination +// signal is sent to the parent. When the parent notification is resolved (by +// waiting or ignoring), the task is reaped. +// +// - Leader, has tracer, tracer thread group is not parent thread group: +// SIGCHLD is sent to the tracer. When the tracer notification is resolved (by +// waiting or detaching), and all other tasks in the thread group are dead, the +// thread group's termination signal is sent to the parent. (Note that the +// tracer cannot resolve the exit notification by waiting until the thread +// group is empty.) When the parent notification is resolved, the task is +// reaped. +// +// - Leader, has tracer, tracer thread group is parent thread group: +// +// If all other tasks in the thread group are dead, the thread group's +// termination signal is sent to the parent. At this point, the notification +// can only be resolved by waiting. If the parent detaches from the task as a +// tracer, the notification is not resolved, but the notification can now be +// resolved by waiting or ignoring. When the parent notification is resolved, +// the task is reaped. +// +// If at least one task in the thread group is not dead, SIGCHLD is sent to the +// parent. At this point, the notification cannot be resolved at all; once the +// thread group becomes empty, it can be resolved only by waiting. If the +// parent detaches from the task as a tracer before all remaining tasks die, +// then exit notification proceeds as in the case where the leader never had a +// tracer. If the parent detaches from the task as a tracer after all remaining +// tasks die, the notification is not resolved, but the notification can now be +// resolved by waiting or ignoring. When the parent notification is resolved, +// the task is reaped. +// +// In both of the above cases, when the parent detaches from the task as a +// tracer while the thread group is empty, whether or not the parent resolves +// the notification by ignoring it is based on the parent's SIGCHLD signal +// action, whether or not the thread group's termination signal is SIGCHLD +// (Linux: kernel/ptrace.c:__ptrace_detach() => ignoring_children()). +// +// There is one final wrinkle: A leader can become a non-leader due to a +// sibling execve. In this case, the execing thread detaches the leader's +// tracer (if one exists) and reaps the leader immediately. In Linux, this is +// in fs/exec.c:de_thread(); in the sentry, this is in Task.promoteLocked(). + +type runExitNotify struct{} + +func (*runExitNotify) execute(t *Task) taskRunState { + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + t.advanceExitStateLocked(TaskExitInitiated, TaskExitZombie) + t.tg.liveTasks-- + // Check if this completes a sibling's execve. + if t.tg.execing != nil && t.tg.liveTasks == 1 { + // execing blocks the addition of new tasks to the thread group, so + // the sole living task must be the execing one. + e := t.tg.execing + e.tg.signalHandlers.mu.Lock() + if _, ok := e.stop.(*execStop); ok { + e.endInternalStopLocked() + } + e.tg.signalHandlers.mu.Unlock() + } + t.exitNotifyLocked(false) + // The task goroutine will now exit. + return nil +} + +// exitNotifyLocked is called after changes to t's state that affect exit +// notification. +// +// If fromPtraceDetach is true, the caller is ptraceDetach or exitPtrace; +// thanks to Linux's haphazard implementation of this functionality, such cases +// determine whether parent notifications are ignored based on the parent's +// handling of SIGCHLD, regardless of what the exited task's thread group's +// termination signal is. +// +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) exitNotifyLocked(fromPtraceDetach bool) { + if t.exitState != TaskExitZombie { + return + } + if !t.exitTracerNotified { + t.exitTracerNotified = true + tracer := t.Tracer() + if tracer == nil { + t.exitTracerAcked = true + } else if t != t.tg.leader || t.parent == nil || tracer.tg != t.parent.tg { + // Don't set exitParentNotified if t is non-leader, even if the + // tracer is in the parent thread group, so that if the parent + // detaches the following call to exitNotifyLocked passes through + // the !exitParentNotified case below and causes t to be reaped + // immediately. + // + // Tracer notification doesn't care about about + // SIG_IGN/SA_NOCLDWAIT. + tracer.tg.signalHandlers.mu.Lock() + tracer.sendSignalLocked(t.exitNotificationSignal(linux.SIGCHLD, tracer), true /* group */) + tracer.tg.signalHandlers.mu.Unlock() + // Wake EventTraceeStop waiters as well since this task will never + // ptrace-stop again. + tracer.tg.eventQueue.Notify(EventExit | EventTraceeStop) + } else { + // t is a leader and the tracer is in the parent thread group. + t.exitParentNotified = true + sig := linux.SIGCHLD + if t.tg.tasksCount == 1 { + sig = t.tg.terminationSignal + } + // This notification doesn't care about SIG_IGN/SA_NOCLDWAIT either + // (in Linux, the check in do_notify_parent() is gated by + // !tsk->ptrace.) + t.parent.tg.signalHandlers.mu.Lock() + t.parent.sendSignalLocked(t.exitNotificationSignal(sig, t.parent), true /* group */) + t.parent.tg.signalHandlers.mu.Unlock() + // See below for rationale for this event mask. + t.parent.tg.eventQueue.Notify(EventExit | EventChildGroupStop | EventGroupContinue) + } + } + if t.exitTracerAcked && !t.exitParentNotified { + if t != t.tg.leader { + t.exitParentNotified = true + t.exitParentAcked = true + } else if t.tg.tasksCount == 1 { + t.exitParentNotified = true + if t.parent == nil { + t.exitParentAcked = true + } else { + // "POSIX.1-2001 specifies that if the disposition of SIGCHLD is + // set to SIG_IGN or the SA_NOCLDWAIT flag is set for SIGCHLD (see + // sigaction(2)), then children that terminate do not become + // zombies and a call to wait() or waitpid() will block until all + // children have terminated, and then fail with errno set to + // ECHILD. (The original POSIX standard left the behavior of + // setting SIGCHLD to SIG_IGN unspecified. Note that even though + // the default disposition of SIGCHLD is "ignore", explicitly + // setting the disposition to SIG_IGN results in different + // treatment of zombie process children.) Linux 2.6 conforms to + // this specification." - wait(2) + // + // Some undocumented Linux-specific details: + // + // - All of the above is ignored if the termination signal isn't + // SIGCHLD. + // + // - SA_NOCLDWAIT causes the leader to be immediately reaped, but + // does not suppress the SIGCHLD. + signalParent := t.tg.terminationSignal.IsValid() + t.parent.tg.signalHandlers.mu.Lock() + if t.tg.terminationSignal == linux.SIGCHLD || fromPtraceDetach { + if act, ok := t.parent.tg.signalHandlers.actions[linux.SIGCHLD]; ok { + if act.Handler == arch.SignalActIgnore { + t.exitParentAcked = true + signalParent = false + } else if act.Flags&arch.SignalFlagNoCldWait != 0 { + t.exitParentAcked = true + } + } + } + if signalParent { + t.parent.tg.leader.sendSignalLocked(t.exitNotificationSignal(t.tg.terminationSignal, t.parent), true /* group */) + } + t.parent.tg.signalHandlers.mu.Unlock() + // If a task in the parent was waiting for a child group stop + // or continue, it needs to be notified of the exit, because + // there may be no remaining eligible tasks (so that wait + // should return ECHILD). + t.parent.tg.eventQueue.Notify(EventExit | EventChildGroupStop | EventGroupContinue) + } + } + } + if t.exitTracerAcked && t.exitParentAcked { + t.advanceExitStateLocked(TaskExitZombie, TaskExitDead) + for ns := t.tg.pidns; ns != nil; ns = ns.parent { + tid := ns.tids[t] + delete(ns.tasks, tid) + delete(ns.tids, t) + } + t.tg.exitedCPUStats.Accumulate(t.CPUStats()) + t.tg.ioUsage.Accumulate(t.ioUsage) + t.tg.signalHandlers.mu.Lock() + t.tg.tasks.Remove(t) + if t.tg.lastTimerSignalTask == t { + t.tg.lastTimerSignalTask = nil + } + t.tg.tasksCount-- + tc := t.tg.tasksCount + t.tg.signalHandlers.mu.Unlock() + if tc == 1 && t != t.tg.leader { + // Our fromPtraceDetach doesn't matter here (in Linux terms, this + // is via a call to release_task()). + t.tg.leader.exitNotifyLocked(false) + } else if tc == 0 { + t.tg.processGroup.decRefWithParent(t.tg.parentPG()) + } + if t.parent != nil { + delete(t.parent.children, t) + t.parent = nil + } + } +} + +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) exitNotificationSignal(sig linux.Signal, receiver *Task) *arch.SignalInfo { + info := &arch.SignalInfo{ + Signo: int32(sig), + } + info.SetPid(int32(receiver.tg.pidns.tids[t])) + info.SetUid(int32(t.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) + if t.exitStatus.Signaled() { + info.Code = arch.CLD_KILLED + info.SetStatus(int32(t.exitStatus.Signo)) + } else { + info.Code = arch.CLD_EXITED + info.SetStatus(int32(t.exitStatus.Code)) + } + // TODO: Set utime, stime. + return info +} + +// ExitStatus returns t's exit status, which is only guaranteed to be +// meaningful if t.ExitState() != TaskExitNone. +func (t *Task) ExitStatus() ExitStatus { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + return t.exitStatus +} + +// ExitStatus returns the exit status that would be returned by a consuming +// wait*() on tg. +func (tg *ThreadGroup) ExitStatus() ExitStatus { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + tg.signalHandlers.mu.Lock() + defer tg.signalHandlers.mu.Unlock() + if tg.exiting { + return tg.exitStatus + } + return tg.leader.exitStatus +} + +// TerminationSignal returns the thread group's termination signal. +func (tg *ThreadGroup) TerminationSignal() linux.Signal { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.terminationSignal +} + +// Task events that can be waited for. +const ( + // EventExit represents an exit notification generated for a child thread + // group leader or a tracee under the conditions specified in the comment + // above runExitNotify. + EventExit waiter.EventMask = 1 << iota + + // EventChildGroupStop occurs when a child thread group completes a group + // stop (i.e. all tasks in the child thread group have entered a stopped + // state as a result of a group stop). + EventChildGroupStop + + // EventTraceeStop occurs when a task that is ptraced by a task in the + // notified thread group enters a ptrace stop (see ptrace(2)). + EventTraceeStop + + // EventGroupContinue occurs when a child thread group, or a thread group + // whose leader is ptraced by a task in the notified thread group, that had + // initiated or completed a group stop leaves the group stop, due to the + // child thread group or any task in the child thread group being sent + // SIGCONT. + EventGroupContinue +) + +// WaitOptions controls the behavior of Task.Wait. +type WaitOptions struct { + // If SpecificTID is non-zero, only events from the task with thread ID + // SpecificTID are eligible to be waited for. SpecificTID is resolved in + // the PID namespace of the waiter (the method receiver of Task.Wait). If + // no such task exists, or that task would not otherwise be eligible to be + // waited for by the waiting task, then there are no waitable tasks and + // Wait will return ECHILD. + SpecificTID ThreadID + + // If SpecificPGID is non-zero, only events from ThreadGroups with a + // matching ProcessGroupID are eligible to be waited for. (Same + // constraints as SpecificTID apply.) + SpecificPGID ProcessGroupID + + // Terminology note: Per waitpid(2), "a clone child is one which delivers + // no signal, or a signal other than SIGCHLD to its parent upon + // termination." In Linux, termination signal is technically a per-task + // property rather than a per-thread-group property. However, clone() + // forces no termination signal for tasks created with CLONE_THREAD, and + // execve() resets the termination signal to SIGCHLD, so all + // non-group-leader threads have no termination signal and are therefore + // "clone tasks". + + // If NonCloneTasks is true, events from non-clone tasks are eligible to be + // waited for. + NonCloneTasks bool + + // If CloneTasks is true, events from clone tasks are eligible to be waited + // for. + CloneTasks bool + + // Events is a bitwise combination of the events defined above that specify + // what events are of interest to the call to Wait. + Events waiter.EventMask + + // If ConsumeEvent is true, the Wait should consume the event such that it + // cannot be returned by a future Wait. Note that if a task exit is + // consumed in this way, in most cases the task will be reaped. + ConsumeEvent bool + + // If BlockInterruptErr is not nil, Wait will block until either an event + // is available or there are no tasks that could produce a waitable event; + // if that blocking is interrupted, Wait returns BlockInterruptErr. If + // BlockInterruptErr is nil, Wait will not block. + BlockInterruptErr error +} + +// Preconditions: The TaskSet mutex must be locked (for reading or writing). +func (o *WaitOptions) matchesTask(t *Task, pidns *PIDNamespace) bool { + if o.SpecificTID != 0 && o.SpecificTID != pidns.tids[t] { + return false + } + if o.SpecificPGID != 0 && o.SpecificPGID != pidns.pgids[t.tg.processGroup] { + return false + } + if t == t.tg.leader && t.tg.terminationSignal == linux.SIGCHLD { + return o.NonCloneTasks + } + return o.CloneTasks +} + +// ErrNoWaitableEvent is returned by non-blocking Task.Waits (e.g. +// waitpid(WNOHANG)) that find no waitable events, but determine that waitable +// events may exist in the future. (In contrast, if a non-blocking or blocking +// Wait determines that there are no tasks that can produce a waitable event, +// Task.Wait returns ECHILD.) +var ErrNoWaitableEvent = errors.New("non-blocking Wait found eligible threads but no waitable events") + +// WaitResult contains information about a waited-for event. +type WaitResult struct { + // Task is the task that reported the event. + Task *Task + + // TID is the thread ID of Task in the PID namespace of the task that + // called Wait (that is, the method receiver of the call to Task.Wait). TID + // is provided because consuming exit waits cause the thread ID to be + // deallocated. + TID ThreadID + + // UID is the real UID of Task in the user namespace of the task that + // called Wait. + UID auth.UID + + // Event is exactly one of the events defined above. + Event waiter.EventMask + + // Status is the numeric status associated with the event. + Status uint32 +} + +// Wait waits for an event from a thread group that is a child of t's thread +// group, or a task in such a thread group, or a task that is ptraced by t, +// subject to the options specified in opts. +func (t *Task) Wait(opts *WaitOptions) (*WaitResult, error) { + if opts.BlockInterruptErr == nil { + return t.waitOnce(opts) + } + w, ch := waiter.NewChannelEntry(nil) + t.tg.eventQueue.EventRegister(&w, opts.Events) + defer t.tg.eventQueue.EventUnregister(&w) + for { + wr, err := t.waitOnce(opts) + if err != ErrNoWaitableEvent { + // This includes err == nil. + return wr, err + } + if err := t.Block(ch); err != nil { + return wr, syserror.ConvertIntr(err, opts.BlockInterruptErr) + } + } +} + +func (t *Task) waitOnce(opts *WaitOptions) (*WaitResult, error) { + anyWaitableTasks := false + + t.tg.pidns.owner.mu.Lock() + defer t.tg.pidns.owner.mu.Unlock() + + // Without the (unimplemented) __WNOTHREAD flag, a task can wait on the + // children and tracees of any task in the same thread group. + for parent := t.tg.tasks.Front(); parent != nil; parent = parent.Next() { + for child := range parent.children { + if !opts.matchesTask(child, parent.tg.pidns) { + continue + } + // Non-leaders don't notify parents on exit and aren't eligible to + // be waited on. + if opts.Events&EventExit != 0 && child == child.tg.leader && !child.exitParentAcked { + anyWaitableTasks = true + if wr := t.waitCollectZombieLocked(child, opts, false); wr != nil { + return wr, nil + } + } + // Check for group stops and continues. Tasks that have passed + // TaskExitInitiated can no longer participate in group stops. + if opts.Events&(EventChildGroupStop|EventGroupContinue) == 0 { + continue + } + if child.exitState >= TaskExitInitiated { + continue + } + // If the waiter is in the same thread group as the task's + // tracer, do not report its group stops; they will be reported + // as ptrace stops instead. This also skips checking for group + // continues, but they'll be checked for when scanning tracees + // below. (Per kernel/exit.c:wait_consider_task(): "If a + // ptracer wants to distinguish the two events for its own + // children, it should create a separate process which takes + // the role of real parent.") + if tracer := child.Tracer(); tracer != nil && tracer.tg == parent.tg { + continue + } + anyWaitableTasks = true + if opts.Events&EventChildGroupStop != 0 { + if wr := t.waitCollectChildGroupStopLocked(child, opts); wr != nil { + return wr, nil + } + } + if opts.Events&EventGroupContinue != 0 { + if wr := t.waitCollectGroupContinueLocked(child, opts); wr != nil { + return wr, nil + } + } + } + for tracee := range parent.ptraceTracees { + if !opts.matchesTask(tracee, parent.tg.pidns) { + continue + } + // Non-leaders do notify tracers on exit. + if opts.Events&EventExit != 0 && !tracee.exitTracerAcked { + anyWaitableTasks = true + if wr := t.waitCollectZombieLocked(tracee, opts, true); wr != nil { + return wr, nil + } + } + if opts.Events&(EventTraceeStop|EventGroupContinue) == 0 { + continue + } + if tracee.exitState >= TaskExitInitiated { + continue + } + anyWaitableTasks = true + if opts.Events&EventTraceeStop != 0 { + if wr := t.waitCollectTraceeStopLocked(tracee, opts); wr != nil { + return wr, nil + } + } + if opts.Events&EventGroupContinue != 0 { + if wr := t.waitCollectGroupContinueLocked(tracee, opts); wr != nil { + return wr, nil + } + } + } + } + + if anyWaitableTasks { + return nil, ErrNoWaitableEvent + } + return nil, syserror.ECHILD +} + +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) waitCollectZombieLocked(target *Task, opts *WaitOptions, asPtracer bool) *WaitResult { + if asPtracer && !target.exitTracerNotified { + return nil + } + if !asPtracer && !target.exitParentNotified { + return nil + } + // Zombied thread group leaders are never waitable until their thread group + // is otherwise empty. Usually this is caught by the + // target.exitParentNotified check above, but if t is both (in the thread + // group of) target's tracer and parent, asPtracer may be true. + if target == target.tg.leader && target.tg.tasksCount != 1 { + return nil + } + pid := t.tg.pidns.tids[target] + uid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow() + status := target.exitStatus.Status() + if !opts.ConsumeEvent { + return &WaitResult{ + Task: target, + TID: pid, + UID: uid, + Event: EventExit, + Status: status, + } + } + // Surprisingly, the exit status reported by a non-consuming wait can + // differ from that reported by a consuming wait; the latter will return + // the group exit code if one is available. + if target.tg.exiting { + status = target.tg.exitStatus.Status() + } + // t may be (in the thread group of) target's parent, tracer, or both. We + // don't need to check for !exitTracerAcked because tracees are detached + // here, and we don't need to check for !exitParentAcked because zombies + // will be reaped here. + if tracer := target.Tracer(); tracer != nil && tracer.tg == t.tg && target.exitTracerNotified { + target.exitTracerAcked = true + target.ptraceTracer.Store((*Task)(nil)) + delete(t.ptraceTracees, target) + } + if target.parent != nil && target.parent.tg == t.tg && target.exitParentNotified { + target.exitParentAcked = true + if target == target.tg.leader { + // target.tg.exitedCPUStats doesn't include target.CPUStats() yet, + // and won't until after target.exitNotifyLocked() (maybe). Include + // target.CPUStats() explicitly. This is consistent with Linux, + // which accounts an exited task's cputime to its thread group in + // kernel/exit.c:release_task() => __exit_signal(), and uses + // thread_group_cputime_adjusted() in wait_task_zombie(). + t.tg.childCPUStats.Accumulate(target.CPUStats()) + t.tg.childCPUStats.Accumulate(target.tg.exitedCPUStats) + t.tg.childCPUStats.Accumulate(target.tg.childCPUStats) + // Update t's child max resident set size. The size will be the maximum + // of this thread's size and all its childrens' sizes. + if t.tg.childMaxRSS < target.tg.maxRSS { + t.tg.childMaxRSS = target.tg.maxRSS + } + if t.tg.childMaxRSS < target.tg.childMaxRSS { + t.tg.childMaxRSS = target.tg.childMaxRSS + } + } + } + target.exitNotifyLocked(false) + return &WaitResult{ + Task: target, + TID: pid, + UID: uid, + Event: EventExit, + Status: status, + } +} + +// updateRSSLocked updates t.tg.maxRSS. +// +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) updateRSSLocked() { + if mmMaxRSS := t.MemoryManager().MaxResidentSetSize(); t.tg.maxRSS < mmMaxRSS { + t.tg.maxRSS = mmMaxRSS + } +} + +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) waitCollectChildGroupStopLocked(target *Task, opts *WaitOptions) *WaitResult { + target.tg.signalHandlers.mu.Lock() + defer target.tg.signalHandlers.mu.Unlock() + if !target.tg.groupStopWaitable { + return nil + } + pid := t.tg.pidns.tids[target] + uid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow() + sig := target.tg.groupStopSignal + if opts.ConsumeEvent { + target.tg.groupStopWaitable = false + } + return &WaitResult{ + Task: target, + TID: pid, + UID: uid, + Event: EventChildGroupStop, + // There is no name for these status constants. + Status: (uint32(sig)&0xff)<<8 | 0x7f, + } +} + +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) waitCollectGroupContinueLocked(target *Task, opts *WaitOptions) *WaitResult { + target.tg.signalHandlers.mu.Lock() + defer target.tg.signalHandlers.mu.Unlock() + if !target.tg.groupContWaitable { + return nil + } + pid := t.tg.pidns.tids[target] + uid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow() + if opts.ConsumeEvent { + target.tg.groupContWaitable = false + } + return &WaitResult{ + Task: target, + TID: pid, + UID: uid, + Event: EventGroupContinue, + Status: 0xffff, + } +} + +// Preconditions: The TaskSet mutex must be locked for writing. +func (t *Task) waitCollectTraceeStopLocked(target *Task, opts *WaitOptions) *WaitResult { + target.tg.signalHandlers.mu.Lock() + defer target.tg.signalHandlers.mu.Unlock() + if target.stop == nil { + return nil + } + if _, ok := target.stop.(*ptraceStop); !ok { + return nil + } + if target.ptraceCode == 0 { + return nil + } + pid := t.tg.pidns.tids[target] + uid := target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow() + code := target.ptraceCode + if opts.ConsumeEvent { + target.ptraceCode = 0 + } + return &WaitResult{ + Task: target, + TID: pid, + UID: uid, + Event: EventTraceeStop, + Status: uint32(code)<<8 | 0x7f, + } +} + +// ExitState returns t's current progress through the exit path. +func (t *Task) ExitState() TaskExitState { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + return t.exitState +} + +// ParentDeathSignal returns t's parent death signal. +func (t *Task) ParentDeathSignal() linux.Signal { + t.mu.Lock() + defer t.mu.Unlock() + return t.parentDeathSignal +} + +// SetParentDeathSignal sets t's parent death signal. +func (t *Task) SetParentDeathSignal(sig linux.Signal) { + t.mu.Lock() + defer t.mu.Unlock() + t.parentDeathSignal = sig +} diff --git a/pkg/sentry/kernel/task_identity.go b/pkg/sentry/kernel/task_identity.go new file mode 100644 index 000000000..a51fa9d7e --- /dev/null +++ b/pkg/sentry/kernel/task_identity.go @@ -0,0 +1,557 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Credentials returns t's credentials by value. +func (t *Task) Credentials() auth.Credentials { + t.mu.Lock() + defer t.mu.Unlock() + return *t.creds // Copy out with lock held. +} + +// UserNamespace returns the user namespace associated with the task. +func (t *Task) UserNamespace() *auth.UserNamespace { + t.mu.Lock() + defer t.mu.Unlock() + return t.creds.UserNamespace +} + +// HasCapabilityIn checks if the task has capability cp in user namespace ns. +func (t *Task) HasCapabilityIn(cp linux.Capability, ns *auth.UserNamespace) bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.creds.HasCapabilityIn(cp, ns) +} + +// HasCapability checks if the task has capability cp in its user namespace. +func (t *Task) HasCapability(cp linux.Capability) bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.creds.HasCapability(cp) +} + +// SetUID implements the semantics of setuid(2). +func (t *Task) SetUID(uid auth.UID) error { + // setuid considers -1 to be invalid. + if !uid.Ok() { + return syserror.EINVAL + } + t.mu.Lock() + defer t.mu.Unlock() + kuid := t.creds.UserNamespace.MapToKUID(uid) + if !kuid.Ok() { + return syserror.EINVAL + } + // "setuid() sets the effective user ID of the calling process. If the + // effective UID of the caller is root (more precisely: if the caller has + // the CAP_SETUID capability), the real UID and saved set-user-ID are also + // set." - setuid(2) + if t.creds.HasCapability(linux.CAP_SETUID) { + t.setKUIDsUncheckedLocked(kuid, kuid, kuid) + return nil + } + // "EPERM: The user is not privileged (Linux: does not have the CAP_SETUID + // capability) and uid does not match the real UID or saved set-user-ID of + // the calling process." + if kuid != t.creds.RealKUID && kuid != t.creds.SavedKUID { + return syserror.EPERM + } + t.setKUIDsUncheckedLocked(t.creds.RealKUID, kuid, t.creds.SavedKUID) + return nil +} + +// SetREUID implements the semantics of setreuid(2). +func (t *Task) SetREUID(r, e auth.UID) error { + t.mu.Lock() + defer t.mu.Unlock() + // "Supplying a value of -1 for either the real or effective user ID forces + // the system to leave that ID unchanged." - setreuid(2) + newR := t.creds.RealKUID + if r.Ok() { + newR = t.creds.UserNamespace.MapToKUID(r) + if !newR.Ok() { + return syserror.EINVAL + } + } + newE := t.creds.EffectiveKUID + if e.Ok() { + newE = t.creds.UserNamespace.MapToKUID(e) + if !newE.Ok() { + return syserror.EINVAL + } + } + if !t.creds.HasCapability(linux.CAP_SETUID) { + // "Unprivileged processes may only set the effective user ID to the + // real user ID, the effective user ID, or the saved set-user-ID." + if newE != t.creds.RealKUID && newE != t.creds.EffectiveKUID && newE != t.creds.SavedKUID { + return syserror.EPERM + } + // "Unprivileged users may only set the real user ID to the real user + // ID or the effective user ID." + if newR != t.creds.RealKUID && newR != t.creds.EffectiveKUID { + return syserror.EPERM + } + } + // "If the real user ID is set (i.e., ruid is not -1) or the effective user + // ID is set to a value not equal to the previous real user ID, the saved + // set-user-ID will be set to the new effective user ID." + newS := t.creds.SavedKUID + if r.Ok() || (e.Ok() && newE != t.creds.EffectiveKUID) { + newS = newE + } + t.setKUIDsUncheckedLocked(newR, newE, newS) + return nil +} + +// SetRESUID implements the semantics of the setresuid(2) syscall. +func (t *Task) SetRESUID(r, e, s auth.UID) error { + t.mu.Lock() + defer t.mu.Unlock() + // "Unprivileged user processes may change the real UID, effective UID, and + // saved set-user-ID, each to one of: the current real UID, the current + // effective UID or the current saved set-user-ID. Privileged processes (on + // Linux, those having the CAP_SETUID capability) may set the real UID, + // effective UID, and saved set-user-ID to arbitrary values. If one of the + // arguments equals -1, the corresponding value is not changed." - + // setresuid(2) + var err error + newR := t.creds.RealKUID + if r.Ok() { + newR, err = t.creds.UseUID(r) + if err != nil { + return err + } + } + newE := t.creds.EffectiveKUID + if e.Ok() { + newE, err = t.creds.UseUID(e) + if err != nil { + return err + } + } + newS := t.creds.SavedKUID + if s.Ok() { + newS, err = t.creds.UseUID(s) + if err != nil { + return err + } + } + t.setKUIDsUncheckedLocked(newR, newE, newS) + return nil +} + +// Preconditions: t.mu must be locked. +func (t *Task) setKUIDsUncheckedLocked(newR, newE, newS auth.KUID) { + root := t.creds.UserNamespace.MapToKUID(auth.RootUID) + oldR, oldE, oldS := t.creds.RealKUID, t.creds.EffectiveKUID, t.creds.SavedKUID + t.creds.RealKUID, t.creds.EffectiveKUID, t.creds.SavedKUID = newR, newE, newS + + // "1. If one or more of the real, effective or saved set user IDs was + // previously 0, and as a result of the UID changes all of these IDs have a + // nonzero value, then all capabilities are cleared from the permitted and + // effective capability sets." - capabilities(7) + if (oldR == root || oldE == root || oldS == root) && (newR != root && newE != root && newS != root) { + // prctl(2): "PR_SET_KEEPCAP: Set the state of the calling thread's + // "keep capabilities" flag, which determines whether the thread's permitted + // capability set is cleared when a change is made to the + // thread's user IDs such that the thread's real UID, effective + // UID, and saved set-user-ID all become nonzero when at least + // one of them previously had the value 0. By default, the + // permitted capability set is cleared when such a change is + // made; setting the "keep capabilities" flag prevents it from + // being cleared." (A thread's effective capability set is always + // cleared when such a credential change is made, + // regardless of the setting of the "keep capabilities" flag.) + if !t.creds.KeepCaps { + t.creds.PermittedCaps = 0 + t.creds.EffectiveCaps = 0 + } + } + // """ + // 2. If the effective user ID is changed from 0 to nonzero, then all + // capabilities are cleared from the effective set. + // + // 3. If the effective user ID is changed from nonzero to 0, then the + // permitted set is copied to the effective set. + // """ + if oldE == root && newE != root { + t.creds.EffectiveCaps = 0 + } else if oldE != root && newE == root { + t.creds.EffectiveCaps = t.creds.PermittedCaps + } + // "4. If the filesystem user ID is changed from 0 to nonzero (see + // setfsuid(2)), then the following capabilities are cleared from the + // effective set: ..." + // (filesystem UIDs aren't implemented, nor are any of the capabilities in + // question) + + // Not documented, but compare Linux's kernel/cred.c:commit_creds(). + if oldE != newE { + t.parentDeathSignal = 0 + } +} + +// SetGID implements the semantics of setgid(2). +func (t *Task) SetGID(gid auth.GID) error { + if !gid.Ok() { + return syserror.EINVAL + } + t.mu.Lock() + defer t.mu.Unlock() + kgid := t.creds.UserNamespace.MapToKGID(gid) + if !kgid.Ok() { + return syserror.EINVAL + } + if t.creds.HasCapability(linux.CAP_SETGID) { + t.setKGIDsUncheckedLocked(kgid, kgid, kgid) + return nil + } + if kgid != t.creds.RealKGID && kgid != t.creds.SavedKGID { + return syserror.EPERM + } + t.setKGIDsUncheckedLocked(t.creds.RealKGID, kgid, t.creds.SavedKGID) + return nil +} + +// SetREGID implements the semantics of setregid(2). +func (t *Task) SetREGID(r, e auth.GID) error { + t.mu.Lock() + defer t.mu.Unlock() + newR := t.creds.RealKGID + if r.Ok() { + newR = t.creds.UserNamespace.MapToKGID(r) + if !newR.Ok() { + return syserror.EINVAL + } + } + newE := t.creds.EffectiveKGID + if e.Ok() { + newE = t.creds.UserNamespace.MapToKGID(e) + if !newE.Ok() { + return syserror.EINVAL + } + } + if !t.creds.HasCapability(linux.CAP_SETGID) { + if newE != t.creds.RealKGID && newE != t.creds.EffectiveKGID && newE != t.creds.SavedKGID { + return syserror.EPERM + } + if newR != t.creds.RealKGID && newR != t.creds.EffectiveKGID { + return syserror.EPERM + } + } + newS := t.creds.SavedKGID + if r.Ok() || (e.Ok() && newE != t.creds.EffectiveKGID) { + newS = newE + } + t.setKGIDsUncheckedLocked(newR, newE, newS) + return nil +} + +// SetRESGID implements the semantics of the setresgid(2) syscall. +func (t *Task) SetRESGID(r, e, s auth.GID) error { + t.mu.Lock() + defer t.mu.Unlock() + var err error + newR := t.creds.RealKGID + if r.Ok() { + newR, err = t.creds.UseGID(r) + if err != nil { + return err + } + } + newE := t.creds.EffectiveKGID + if e.Ok() { + newE, err = t.creds.UseGID(e) + if err != nil { + return err + } + } + newS := t.creds.SavedKGID + if s.Ok() { + newS, err = t.creds.UseGID(s) + if err != nil { + return err + } + } + t.setKGIDsUncheckedLocked(newR, newE, newS) + return nil +} + +func (t *Task) setKGIDsUncheckedLocked(newR, newE, newS auth.KGID) { + oldE := t.creds.EffectiveKGID + t.creds.RealKGID, t.creds.EffectiveKGID, t.creds.SavedKGID = newR, newE, newS + + // Not documented, but compare Linux's kernel/cred.c:commit_creds(). + if oldE != newE { + t.parentDeathSignal = 0 + } +} + +// SetExtraGIDs attempts to change t's supplemental groups. All IDs are +// interpreted as being in t's user namespace. +func (t *Task) SetExtraGIDs(gids []auth.GID) error { + t.mu.Lock() + defer t.mu.Unlock() + if !t.creds.HasCapability(linux.CAP_SETGID) { + return syserror.EPERM + } + kgids := make([]auth.KGID, len(gids)) + for i, gid := range gids { + kgid := t.creds.UserNamespace.MapToKGID(gid) + if !kgid.Ok() { + return syserror.EINVAL + } + kgids[i] = kgid + } + t.creds.ExtraKGIDs = kgids + return nil +} + +// SetCapabilitySets attempts to change t's permitted, inheritable, and +// effective capability sets. +func (t *Task) SetCapabilitySets(permitted, inheritable, effective auth.CapabilitySet) error { + t.mu.Lock() + defer t.mu.Unlock() + // "Permitted: This is a limiting superset for the effective capabilities + // that the thread may assume." - capabilities(7) + if effective & ^permitted != 0 { + return syserror.EPERM + } + // "It is also a limiting superset for the capabilities that may be added + // to the inheritable set by a thread that does not have the CAP_SETPCAP + // capability in its effective set." + if !t.creds.HasCapability(linux.CAP_SETPCAP) && (inheritable & ^(t.creds.InheritableCaps|t.creds.PermittedCaps) != 0) { + return syserror.EPERM + } + // "If a thread drops a capability from its permitted set, it can never + // reacquire that capability (unless it execve(2)s ..." + if permitted & ^t.creds.PermittedCaps != 0 { + return syserror.EPERM + } + // "... if a capability is not in the bounding set, then a thread can't add + // this capability to its inheritable set, even if it was in its permitted + // capabilities ..." + if inheritable & ^(t.creds.InheritableCaps|t.creds.BoundingCaps) != 0 { + return syserror.EPERM + } + t.creds.PermittedCaps = permitted + t.creds.InheritableCaps = inheritable + t.creds.EffectiveCaps = effective + return nil +} + +// DropBoundingCapability attempts to drop capability cp from t's capability +// bounding set. +func (t *Task) DropBoundingCapability(cp linux.Capability) error { + t.mu.Lock() + defer t.mu.Unlock() + if !t.creds.HasCapability(linux.CAP_SETPCAP) { + return syserror.EPERM + } + t.creds.BoundingCaps &^= auth.CapabilitySetOf(cp) + return nil +} + +// SetUserNamespace attempts to move c into ns. +func (t *Task) SetUserNamespace(ns *auth.UserNamespace) error { + t.mu.Lock() + defer t.mu.Unlock() + + // "A process reassociating itself with a user namespace must have the + // CAP_SYS_ADMIN capability in the target user namespace." - setns(2) + // + // If t just created ns, then t.creds is guaranteed to have CAP_SYS_ADMIN + // in ns (by rule 3 in auth.Credentials.HasCapability). + if !t.creds.HasCapabilityIn(linux.CAP_SYS_ADMIN, ns) { + return syserror.EPERM + } + + t.creds.UserNamespace = ns + // "The child process created by clone(2) with the CLONE_NEWUSER flag + // starts out with a complete set of capabilities in the new user + // namespace. Likewise, a process that creates a new user namespace using + // unshare(2) or joins an existing user namespace using setns(2) gains a + // full set of capabilities in that namespace." + t.creds.PermittedCaps = auth.AllCapabilities + t.creds.InheritableCaps = 0 + t.creds.EffectiveCaps = auth.AllCapabilities + t.creds.BoundingCaps = auth.AllCapabilities + // "A call to clone(2), unshare(2), or setns(2) using the CLONE_NEWUSER + // flag sets the "securebits" flags (see capabilities(7)) to their default + // values (all flags disabled) in the child (for clone(2)) or caller (for + // unshare(2), or setns(2)." - user_namespaces(7) + t.creds.KeepCaps = false + + return nil +} + +// SetKeepCaps will set the keep capabilities flag PR_SET_KEEPCAPS. +func (t *Task) SetKeepCaps(k bool) { + t.mu.Lock() + defer t.mu.Unlock() + t.creds.KeepCaps = k +} + +// updateCredsForExec updates t.creds to reflect an execve(). +// +// NOTE: We currently do not implement privileged executables +// (set-user/group-ID bits and file capabilities). This allows us to make a lot +// of simplifying assumptions: +// +// - We assume the no_new_privs bit (set by prctl(SET_NO_NEW_PRIVS)), which +// disables the features we don't support anyway, is always set. This +// drastically simplifies this function. +// +// - We don't implement AT_SECURE, because no_new_privs always being set means +// that the conditions that require AT_SECURE never arise. (Compare Linux's +// security/commoncap.c:cap_bprm_set_creds() and cap_bprm_secureexec().) +// +// - We don't check for CAP_SYS_ADMIN in prctl(PR_SET_SECCOMP), since +// seccomp-bpf is also allowed if the task has no_new_privs set. +// +// - Task.ptraceAttach does not serialize with execve as it does in Linux, +// since no_new_privs being set has the same effect as the presence of an +// unprivileged tracer. +// +// Preconditions: t.mu must be locked. +func (t *Task) updateCredsForExecLocked() { + // """ + // During an execve(2), the kernel calculates the new capabilities of + // the process using the following algorithm: + // + // P'(permitted) = (P(inheritable) & F(inheritable)) | + // (F(permitted) & cap_bset) + // + // P'(effective) = F(effective) ? P'(permitted) : 0 + // + // P'(inheritable) = P(inheritable) [i.e., unchanged] + // + // where: + // + // P denotes the value of a thread capability set before the + // execve(2) + // + // P' denotes the value of a thread capability set after the + // execve(2) + // + // F denotes a file capability set + // + // cap_bset is the value of the capability bounding set + // + // ... + // + // In order to provide an all-powerful root using capability sets, during + // an execve(2): + // + // 1. If a set-user-ID-root program is being executed, or the real user ID + // of the process is 0 (root) then the file inheritable and permitted sets + // are defined to be all ones (i.e. all capabilities enabled). + // + // 2. If a set-user-ID-root program is being executed, then the file + // effective bit is defined to be one (enabled). + // + // The upshot of the above rules, combined with the capabilities + // transformations described above, is that when a process execve(2)s a + // set-user-ID-root program, or when a process with an effective UID of 0 + // execve(2)s a program, it gains all capabilities in its permitted and + // effective capability sets, except those masked out by the capability + // bounding set. + // """ - capabilities(7) + // (ambient capability sets omitted) + // + // As the last paragraph implies, the case of "a set-user-ID root program + // is being executed" also includes the case where (namespace) root is + // executing a non-set-user-ID program; the actual check is just based on + // the effective user ID. + var newPermitted auth.CapabilitySet // since F(inheritable) == F(permitted) == 0 + fileEffective := false + root := t.creds.UserNamespace.MapToKUID(auth.RootUID) + if t.creds.EffectiveKUID == root || t.creds.RealKUID == root { + newPermitted = t.creds.InheritableCaps | t.creds.BoundingCaps + if t.creds.EffectiveKUID == root { + fileEffective = true + } + } + + // Now we enter poorly-documented, somewhat confusing territory. (The + // accompanying comment in Linux's security/commoncap.c:cap_bprm_set_creds + // is not very helpful.) My reading of it is: + // + // If at least one of the following is true: + // + // A1. The execing task is ptraced, and the tracer did not have + // CAP_SYS_PTRACE in the execing task's user namespace at the time of + // PTRACE_ATTACH. + // + // A2. The execing task shares its FS context with at least one task in + // another thread group. + // + // A3. The execing task has no_new_privs set. + // + // AND at least one of the following is true: + // + // B1. The new effective user ID (which may come from set-user-ID, or be the + // execing task's existing effective user ID) is not equal to the task's + // real UID. + // + // B2. The new effective group ID (which may come from set-group-ID, or be + // the execing task's existing effective group ID) is not equal to the + // task's real GID. + // + // B3. The new permitted capability set contains capabilities not in the + // task's permitted capability set. + // + // Then: + // + // C1. Limit the new permitted capability set to the task's permitted + // capability set. + // + // C2. If either the task does not have CAP_SETUID in its user namespace, or + // the task has no_new_privs set, force the new effective UID and GID to + // the task's real UID and GID. + // + // But since no_new_privs is always set (A3 is always true), this becomes + // much simpler. If B1 and B2 are false, C2 is a no-op. If B3 is false, C1 + // is a no-op. So we can just do C1 and C2 unconditionally. + if t.creds.EffectiveKUID != t.creds.RealKUID || t.creds.EffectiveKGID != t.creds.RealKGID { + t.creds.EffectiveKUID = t.creds.RealKUID + t.creds.EffectiveKGID = t.creds.RealKGID + t.parentDeathSignal = 0 + } + // (Saved set-user-ID is always set to the new effective user ID, and saved + // set-group-ID is always set to the new effective group ID, regardless of + // the above.) + t.creds.SavedKUID = t.creds.RealKUID + t.creds.SavedKGID = t.creds.RealKGID + t.creds.PermittedCaps &= newPermitted + if fileEffective { + t.creds.EffectiveCaps = t.creds.PermittedCaps + } else { + t.creds.EffectiveCaps = 0 + } + + // prctl(2): The "keep capabilities" value will be reset to 0 on subsequent + // calls to execve(2). + t.creds.KeepCaps = false + + // "The bounding set is inherited at fork(2) from the thread's parent, and + // is preserved across an execve(2)". So we're done. +} diff --git a/pkg/sentry/kernel/task_log.go b/pkg/sentry/kernel/task_log.go new file mode 100644 index 000000000..18efacb19 --- /dev/null +++ b/pkg/sentry/kernel/task_log.go @@ -0,0 +1,137 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "sort" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // maxStackDebugBytes is the maximum number of user stack bytes that may be + // printed by debugDumpStack. + maxStackDebugBytes = 1024 +) + +// Infof logs an formatted info message by calling log.Infof. +func (t *Task) Infof(fmt string, v ...interface{}) { + if log.IsLogging(log.Info) { + log.Infof(t.logPrefix.Load().(string)+fmt, v...) + } +} + +// Warningf logs a warning string by calling log.Warningf. +func (t *Task) Warningf(fmt string, v ...interface{}) { + if log.IsLogging(log.Warning) { + log.Warningf(t.logPrefix.Load().(string)+fmt, v...) + } +} + +// Debugf creates a debug string that includes the task ID. +func (t *Task) Debugf(fmt string, v ...interface{}) { + if log.IsLogging(log.Debug) { + log.Debugf(t.logPrefix.Load().(string)+fmt, v...) + } +} + +// IsLogging returns true iff this level is being logged. +func (t *Task) IsLogging(level log.Level) bool { + return log.IsLogging(level) +} + +// DebugDumpState logs task state at log level debug. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) DebugDumpState() { + t.debugDumpRegisters() + t.debugDumpStack() + if mm := t.MemoryManager(); mm != nil { + t.Debugf("Mappings:\n%s", mm) + } + t.Debugf("FDMap:\n%s", t.FDMap()) +} + +// debugDumpRegisters logs register state at log level debug. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) debugDumpRegisters() { + if !t.IsLogging(log.Debug) { + return + } + regmap, err := t.Arch().RegisterMap() + if err != nil { + t.Debugf("Registers: %v", err) + } else { + t.Debugf("Registers:") + var regs []string + for reg := range regmap { + regs = append(regs, reg) + } + sort.Strings(regs) + for _, reg := range regs { + t.Debugf("%-8s = %016x", reg, regmap[reg]) + } + } +} + +// debugDumpStack logs user stack contents at log level debug. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) debugDumpStack() { + if !t.IsLogging(log.Debug) { + return + } + m := t.MemoryManager() + if m == nil { + t.Debugf("Memory manager for task is gone, skipping application stack dump.") + return + } + t.Debugf("Stack:") + start := usermem.Addr(t.Arch().Stack()) + // Round addr down to a 16-byte boundary. + start &= ^usermem.Addr(15) + // Print 16 bytes per line, one byte at a time. + for offset := uint64(0); offset < maxStackDebugBytes; offset += 16 { + addr, ok := start.AddLength(offset) + if !ok { + break + } + var data [16]byte + n, err := m.CopyIn(t, addr, data[:], usermem.IOOpts{ + IgnorePermissions: true, + }) + // Print as much of the line as we can, even if an error was + // encountered. + if n > 0 { + t.Debugf("%x: % x", addr, data[:n]) + } + if err != nil { + t.Debugf("Error reading stack at address %x: %v", addr+usermem.Addr(n), err) + break + } + } +} + +// updateLogPrefix updates the task's cached log prefix to reflect its +// current thread ID. +// +// Preconditions: The task's owning TaskSet.mu must be locked. +func (t *Task) updateLogPrefixLocked() { + // Use the task's TID in the root PID namespace for logging. + t.logPrefix.Store(fmt.Sprintf("[% 4d] ", t.tg.pidns.owner.Root.tids[t])) +} diff --git a/pkg/sentry/kernel/task_net.go b/pkg/sentry/kernel/task_net.go new file mode 100644 index 000000000..4df2e53d3 --- /dev/null +++ b/pkg/sentry/kernel/task_net.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" +) + +// IsNetworkNamespaced returns true if t is in a non-root network namespace. +func (t *Task) IsNetworkNamespaced() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.netns +} + +// NetworkContext returns the network stack used by the task. NetworkContext +// may return nil if no network stack is available. +func (t *Task) NetworkContext() inet.Stack { + if t.IsNetworkNamespaced() { + return nil + } + return t.k.networkStack +} diff --git a/pkg/sentry/kernel/task_resources.go b/pkg/sentry/kernel/task_resources.go new file mode 100644 index 000000000..e529f0c2d --- /dev/null +++ b/pkg/sentry/kernel/task_resources.go @@ -0,0 +1,126 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" +) + +// TaskResources is the subset of a task's data provided by its creator that is +// not provided by the loader. +type TaskResources struct { + // SignalMask is the set of signals whose delivery is currently blocked. + // + // FIXME: Determine if we also need RealSignalMask + SignalMask linux.SignalSet + + // FSContext is the filesystem context. + *FSContext + + // FDMap provides access to files to the task. + *FDMap + + // Tracks abstract sockets that are in use. + AbstractSockets *AbstractSocketNamespace +} + +// newTaskResources returns a new TaskResources, taking an additional reference +// on fdm. +func newTaskResources(fdm *FDMap, fc *FSContext) *TaskResources { + fdm.IncRef() + return &TaskResources{ + FDMap: fdm, + FSContext: fc, + AbstractSockets: NewAbstractSocketNamespace(), + } +} + +// release releases all resources held by the TaskResources. release is called +// by the task when it exits. +func (tr *TaskResources) release() { + tr.FDMap.DecRef() + tr.FDMap = nil + tr.FSContext.DecRef() + tr.FSContext = nil + tr.AbstractSockets = nil +} + +// Fork returns a duplicate of tr. +// +// FIXME: Preconditions: When tr is owned by a Task, that task's +// signal mutex must be locked, or Fork must be called by the task's goroutine. +func (tr *TaskResources) Fork(shareFiles bool, shareFSContext bool) *TaskResources { + var fdmap *FDMap + if shareFiles { + fdmap = tr.FDMap + fdmap.IncRef() + } else { + fdmap = tr.FDMap.Fork() + } + + var fsc *FSContext + if shareFSContext { + fsc = tr.FSContext + fsc.IncRef() + } else { + fsc = tr.FSContext.Fork() + } + + return &TaskResources{ + SignalMask: tr.SignalMask, + FDMap: fdmap, + FSContext: fsc, + AbstractSockets: tr.AbstractSockets, + } +} + +// FDMap returns t's FDMap. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) FDMap() *FDMap { + return t.tr.FDMap +} + +// FSContext returns t's FSContext. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) FSContext() *FSContext { + return t.tr.FSContext +} + +// MountNamespace returns t's MountNamespace. MountNamespace does not take an additional +// reference on the returned MountNamespace. +func (t *Task) MountNamespace() *fs.MountNamespace { + return t.k.mounts +} + +// AbstractSockets returns t's AbstractSocketNamespace. +func (t *Task) AbstractSockets() *AbstractSocketNamespace { + return t.tr.AbstractSockets +} + +// IsChrooted returns true if the root directory of t's FSContext is not the +// root directory of t's MountNamespace. +// +// Preconditions: The caller must be running on the task goroutine, or t.mu +// must be locked. +func (t *Task) IsChrooted() bool { + realRoot := t.k.mounts.Root() + defer realRoot.DecRef() + return t.tr.FSContext.root != realRoot +} diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go new file mode 100644 index 000000000..94ce5582b --- /dev/null +++ b/pkg/sentry/kernel/task_run.go @@ -0,0 +1,346 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "bytes" + "runtime" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// A taskRunState is a reified state in the task state machine. See README.md +// for details. The canonical list of all run states, as well as transitions +// between them, is given in run_states.dot. +// +// The set of possible states is enumerable and completely defined by the +// kernel package, so taskRunState would ideally be represented by a +// discriminated union. However, Go does not support sum types. +// +// Hence, as with TaskStop, data-free taskRunStates should be represented as +// typecast nils to avoid unnecessary allocation. +type taskRunState interface { + // execute executes the code associated with this state over the given task + // and returns the following state. If execute returns nil, the task + // goroutine should exit. + // + // It is valid to tail-call a following state's execute to avoid the + // overhead of converting the following state to an interface object and + // checking for stops, provided that the tail-call cannot recurse. + execute(*Task) taskRunState +} + +// run runs the task goroutine. +// +// threadID a dummy value set to the task's TID in the root PID namespace to +// make it visible in stack dumps. A goroutine for a given task can be identified +// searching for Task.run()'s argument value. +func (t *Task) run(threadID uintptr) { + // Construct t.blockingTimer here. We do this here because we can't + // reconstruct t.blockingTimer during restore in Task.afterLoad(), because + // kernel.timekeeper.SetClocks() hasn't been called yet. + blockingTimerNotifier, blockingTimerChan := ktime.NewChannelNotifier() + t.blockingTimer = ktime.NewTimer(t.k.MonotonicClock(), blockingTimerNotifier) + defer t.blockingTimer.Destroy() + t.blockingTimerChan = blockingTimerChan + + // Activate our address space. + t.Activate() + // The corresponding t.Deactivate occurs in the exit path + // (runExitMain.execute) so that when + // Platform.CooperativelySharesAddressSpace() == true, we give up the + // AddressSpace before the task goroutine finishes executing. + + // Ensure that thread group timers for execution time reflect that this + // task now exists. + t.tg.tm.kick() + + // If this is a newly-started task, it should check for participation in + // group stops. If this is a task resuming after restore, it was + // interrupted by saving. In either case, the task is initially + // interrupted. + t.interruptSelf() + + for { + // Explanation for this ordering: + // + // - A freshly-started task that is stopped should not do anything + // before it enters the stop. + // + // - If taskRunState.execute returns nil, the task goroutine should + // exit without checking for a stop. + // + // - Task.Start won't start Task.run if t.runState is nil, so this + // ordering is safe. + t.doStop() + t.runState = t.runState.execute(t) + if t.runState == nil { + t.accountTaskGoroutineEnter(TaskGoroutineNonexistent) + t.goroutineStopped.Done() + t.tg.liveGoroutines.Done() + t.tg.pidns.owner.liveGoroutines.Done() + t.tg.pidns.owner.runningGoroutines.Done() + + // Keep argument alive because stack trace for dead variables may not be correct. + runtime.KeepAlive(threadID) + return + } + } +} + +// doStop is called by Task.run to block until the task is not stopped. +func (t *Task) doStop() { + if atomic.LoadInt32(&t.stopCount) == 0 { + return + } + t.Deactivate() + // NOTE: t.Activate() must be called without any locks held, so + // this defer must precede the defer for unlocking the signal mutex. + defer t.Activate() + t.accountTaskGoroutineEnter(TaskGoroutineStopped) + defer t.accountTaskGoroutineLeave(TaskGoroutineStopped) + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.tg.pidns.owner.runningGoroutines.Add(-1) + defer t.tg.pidns.owner.runningGoroutines.Add(1) + t.goroutineStopped.Add(-1) + defer t.goroutineStopped.Add(1) + for t.stopCount > 0 { + t.endStopCond.Wait() + } +} + +// The runApp state checks for interrupts before executing untrusted +// application code. +type runApp struct{} + +func (*runApp) execute(t *Task) taskRunState { + if t.interrupted() { + // Checkpointing instructs tasks to stop by sending an interrupt, so we + // must check for stops before entering runInterrupt (instead of + // tail-calling it). + return (*runInterrupt)(nil) + } + + // We're about to switch to the application again. If there's still a + // unhandled SyscallRestartErrno that wasn't translated to an EINTR, + // restart the syscall that was interrupted. If there's a saved signal + // mask, restore it. (Note that restoring the saved signal mask may unblock + // a pending signal, causing another interruption, but that signal should + // not interact with the interrupted syscall.) + if t.haveSyscallReturn { + if sre, ok := SyscallRestartErrnoFromReturn(t.Arch().Return()); ok { + if sre == ERESTART_RESTARTBLOCK { + t.Debugf("Restarting syscall %d with restart block after errno %d: not interrupted by handled signal", t.Arch().SyscallNo(), sre) + t.Arch().RestartSyscallWithRestartBlock() + } else { + t.Debugf("Restarting syscall %d after errno %d: not interrupted by handled signal", t.Arch().SyscallNo(), sre) + t.Arch().RestartSyscall() + } + } + t.haveSyscallReturn = false + } + if t.haveSavedSignalMask { + t.SetSignalMask(t.savedSignalMask) + t.haveSavedSignalMask = false + if t.interrupted() { + return (*runInterrupt)(nil) + } + } + + // Apply restartable sequences. + if t.rseqPreempted { + t.rseqPreempted = false + if t.rseqCPUAddr != 0 { + if err := t.rseqCopyOutCPU(); err != nil { + t.Warningf("Failed to copy CPU to %#x for RSEQ: %v", t.rseqCPUAddr, err) + t.forceSignal(linux.SIGSEGV, false) + t.SendSignal(sigPriv(linux.SIGSEGV)) + // Re-enter the task run loop for signal delivery. + return (*runApp)(nil) + } + } + t.rseqInterrupt() + } + + // Check if we need to enable single-stepping. Tracers expect that the + // kernel preserves the value of the single-step flag set by PTRACE_SETREGS + // whether or not PTRACE_SINGLESTEP/PTRACE_SYSEMU_SINGLESTEP is used (this + // includes our ptrace platform, by the way), so we should only clear the + // single-step flag if we're responsible for setting it. (clearSinglestep + // is therefore analogous to Linux's TIF_FORCED_TF.) + // + // Strictly speaking, we should also not clear the single-step flag if we + // single-step through an instruction that sets the single-step flag + // (arch/x86/kernel/step.c:is_setting_trap_flag()). But nobody sets their + // own TF. (Famous last words, I know.) + clearSinglestep := false + if t.hasTracer() { + t.tg.pidns.owner.mu.RLock() + if t.ptraceSinglestep { + clearSinglestep = !t.Arch().SingleStep() + t.Arch().SetSingleStep() + } + t.tg.pidns.owner.mu.RUnlock() + } + + t.accountTaskGoroutineEnter(TaskGoroutineRunningApp) + info, at, err := t.p.Switch(t.MemoryManager().AddressSpace(), t.Arch(), t.rseqCPU) + t.accountTaskGoroutineLeave(TaskGoroutineRunningApp) + + if clearSinglestep { + t.Arch().ClearSingleStep() + } + + switch err { + case nil: + // Handle application system call. + return t.doSyscall() + + case platform.ErrContextInterrupt: + // Interrupted by platform.Context.Interrupt(). Re-enter the run + // loop to figure out why. + return (*runApp)(nil) + + case platform.ErrContextSignal: + // Looks like a signal has been delivered to us. If it's a synchronous + // signal (SEGV, SIGBUS, etc.), it should be sent to the application + // thread that received it. + sig := linux.Signal(info.Signo) + + // Was it a fault that we should handle internally? If so, this wasn't + // an application-generated signal and we should continue execution + // normally. + if at.Any() { + addr := usermem.Addr(info.Addr()) + err := t.MemoryManager().HandleUserFault(t, addr, at, usermem.Addr(t.Arch().Stack())) + if err == nil { + // The fault was handled appropriately. + // We can resume running the application. + return (*runApp)(nil) + } + + // Is this a vsyscall that we need emulate? + if at.Execute { + if sysno, ok := t.tc.st.LookupEmulate(addr); ok { + return t.doVsyscall(addr, sysno) + } + } + + // The JVM will trigger these errors constantly, so don't + // spam logs with this error. + if err == syserror.EFAULT || err == syserror.EPERM { + t.Debugf("Unhandled user fault: addr=%x ip=%x access=%v err=%v", addr, t.Arch().IP(), at, err) + } else { + t.Warningf("Unhandled user fault: addr=%x ip=%x access=%v err=%v", addr, t.Arch().IP(), at, err) + } + t.DebugDumpState() + + // Continue to signal handling. + // + // Convert a BusError error to a SIGBUS from a SIGSEGV. All + // other info bits stay the same (address, etc.). + if _, ok := err.(*memmap.BusError); ok { + sig = linux.SIGBUS + info.Signo = int32(linux.SIGBUS) + } + } + + switch sig { + case linux.SIGILL: + // N.B. The debug stuff here is arguably + // expensive. Don't fret. This gets called + // about 5 times for a typical application, if + // that. + t.Debugf("SIGILL @ %x", t.Arch().IP()) + + // Is this a CPUID instruction? + expected := arch.CPUIDInstruction[:] + found := make([]byte, len(expected)) + _, err := t.CopyIn(usermem.Addr(t.Arch().IP()), &found) + if err == nil && bytes.Equal(expected, found) { + // Skip the cpuid instruction. + t.Arch().CPUIDEmulate(t) + t.Arch().SetIP(t.Arch().IP() + uintptr(len(expected))) + break + } + + // Treat it like any other synchronous signal. + fallthrough + + case linux.SIGSEGV, linux.SIGBUS, linux.SIGFPE, linux.SIGTRAP: + // Synchronous signal. Send it to ourselves. Assume the signal is + // legitimate and force it (work around the signal being ignored or + // blocked) like Linux does. Conveniently, this is even the correct + // behavior for SIGTRAP from single-stepping. + t.forceSignal(linux.Signal(sig), false /* unconditional */) + t.SendSignal(info) + + case platform.SignalInterrupt: + // Assume that a call to platform.Context.Interrupt() misfired. + + case linux.SIGPROF: + // It's a profiling interrupt: there's not much + // we can do. We've already paid a decent cost + // by intercepting the signal, at this point we + // simply ignore it. + + default: + // Asynchronous signal. Let the system deal with it. + t.k.sendExternalSignal(info, "application") + } + + return (*runApp)(nil) + + case platform.ErrContextCPUPreempted: + // Ensure that RSEQ critical sections are interrupted and per-thread + // CPU values are updated before the next platform.Context.Switch(). + t.rseqPreempted = true + return (*runApp)(nil) + + default: + // What happened? Can't continue. + t.Warningf("Unexpected SwitchToApp error: %v", err) + t.PrepareExit(ExitStatus{Code: t.ExtractErrno(err, -1)}) + return (*runExit)(nil) + } +} + +// waitGoroutineStoppedOrExited blocks until t's task goroutine stops or exits. +func (t *Task) waitGoroutineStoppedOrExited() { + t.goroutineStopped.Wait() +} + +// WaitExited blocks until all task goroutines in tg have exited. +// +// WaitExited does not correspond to anything in Linux; it's provided so that +// external callers of Kernel.CreateProcess can wait for the created thread +// group to terminate. +func (tg *ThreadGroup) WaitExited() { + tg.liveGoroutines.Wait() +} + +// Yield yields the processor for the calling task. +func (t *Task) Yield() { + atomic.AddUint64(&t.yieldCount, 1) + runtime.Gosched() +} diff --git a/pkg/sentry/kernel/task_sched.go b/pkg/sentry/kernel/task_sched.go new file mode 100644 index 000000000..b50139077 --- /dev/null +++ b/pkg/sentry/kernel/task_sched.go @@ -0,0 +1,329 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// CPU scheduling, real and fake. + +import ( + "fmt" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/hostcpu" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// TaskGoroutineState is a coarse representation of the current execution +// status of a kernel.Task goroutine. +type TaskGoroutineState int + +const ( + // TaskGoroutineNonexistent indicates that the task goroutine has either + // not yet been created by Task.Start() or has returned from Task.run(). + // This must be the zero value for TaskGoroutineState. + TaskGoroutineNonexistent TaskGoroutineState = iota + + // TaskGoroutineRunningSys indicates that the task goroutine is executing + // sentry code. + TaskGoroutineRunningSys + + // TaskGoroutineRunningApp indicates that the task goroutine is executing + // application code. + TaskGoroutineRunningApp + + // TaskGoroutineBlockedInterruptible indicates that the task goroutine is + // blocked in Task.block(), and hence may be woken by Task.interrupt() + // (e.g. due to signal delivery). + TaskGoroutineBlockedInterruptible + + // TaskGoroutineBlockedUninterruptible indicates that the task goroutine is + // stopped outside of Task.block() and Task.doStop(), and hence cannot be + // woken by Task.interrupt(). + TaskGoroutineBlockedUninterruptible + + // TaskGoroutineStopped indicates that the task goroutine is blocked in + // Task.doStop(). TaskGoroutineStopped is similar to + // TaskGoroutineBlockedUninterruptible, but is a separate state to make it + // possible to determine when Task.stop is meaningful. + TaskGoroutineStopped +) + +// TaskGoroutineSchedInfo contains task goroutine scheduling state which must +// be read and updated atomically. +type TaskGoroutineSchedInfo struct { + // Timestamp was the value of Kernel.cpuClock when this + // TaskGoroutineSchedInfo was last updated. + Timestamp uint64 + + // State is the current state of the task goroutine. + State TaskGoroutineState + + // UserTicks is the amount of time the task goroutine has spent executing + // its associated Task's application code, in units of linux.ClockTick. + UserTicks uint64 + + // SysTicks is the amount of time the task goroutine has spent executing in + // the sentry, in units of linux.ClockTick. + SysTicks uint64 +} + +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) accountTaskGoroutineEnter(state TaskGoroutineState) { + now := t.k.CPUClockNow() + if t.gosched.State != TaskGoroutineRunningSys { + panic(fmt.Sprintf("Task goroutine switching from state %v (expected %v) to %v", t.gosched.State, TaskGoroutineRunningSys, state)) + } + t.goschedSeq.BeginWrite() + // This function is very hot; avoid defer. + t.gosched.SysTicks += now - t.gosched.Timestamp + t.gosched.Timestamp = now + t.gosched.State = state + t.goschedSeq.EndWrite() +} + +// Preconditions: The caller must be running on the task goroutine, and leaving +// a state indicated by a previous call to +// t.accountTaskGoroutineEnter(state). +func (t *Task) accountTaskGoroutineLeave(state TaskGoroutineState) { + now := t.k.CPUClockNow() + if t.gosched.State != state { + panic(fmt.Sprintf("Task goroutine switching from state %v (expected %v) to %v", t.gosched.State, state, TaskGoroutineRunningSys)) + } + t.goschedSeq.BeginWrite() + // This function is very hot; avoid defer. + if state == TaskGoroutineRunningApp { + t.gosched.UserTicks += now - t.gosched.Timestamp + } + t.gosched.Timestamp = now + t.gosched.State = TaskGoroutineRunningSys + t.goschedSeq.EndWrite() +} + +// TaskGoroutineSchedInfo returns a copy of t's task goroutine scheduling info. +// Most clients should use t.CPUStats() instead. +func (t *Task) TaskGoroutineSchedInfo() TaskGoroutineSchedInfo { + return SeqAtomicLoadTaskGoroutineSchedInfo(&t.goschedSeq, &t.gosched) +} + +// CPUStats returns the CPU usage statistics of t. +func (t *Task) CPUStats() usage.CPUStats { + return t.cpuStatsAt(t.k.CPUClockNow()) +} + +// Preconditions: now <= Kernel.CPUClockNow(). (Since Kernel.cpuClock is +// monotonic, this is satisfied if now is the result of a previous call to +// Kernel.CPUClockNow().) This requirement exists because otherwise a racing +// change to t.gosched can cause cpuStatsAt to adjust stats by too much, making +// the returned stats non-monotonic. +func (t *Task) cpuStatsAt(now uint64) usage.CPUStats { + tsched := t.TaskGoroutineSchedInfo() + if tsched.Timestamp < now { + // Update stats to reflect execution since the last update to + // t.gosched. + switch tsched.State { + case TaskGoroutineRunningSys: + tsched.SysTicks += now - tsched.Timestamp + case TaskGoroutineRunningApp: + tsched.UserTicks += now - tsched.Timestamp + } + } + return usage.CPUStats{ + UserTime: time.Duration(tsched.UserTicks * uint64(linux.ClockTick)), + SysTime: time.Duration(tsched.SysTicks * uint64(linux.ClockTick)), + VoluntarySwitches: atomic.LoadUint64(&t.yieldCount), + } +} + +// CPUStats returns the combined CPU usage statistics of all past and present +// threads in tg. +func (tg *ThreadGroup) CPUStats() usage.CPUStats { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + // Hack to get a pointer to the Kernel. + if tg.leader == nil { + // Per comment on tg.leader, this is only possible if nothing in the + // ThreadGroup has ever executed anyway. + return usage.CPUStats{} + } + now := tg.leader.k.CPUClockNow() + stats := tg.exitedCPUStats + // Account for active tasks. + for t := tg.tasks.Front(); t != nil; t = t.Next() { + stats.Accumulate(t.cpuStatsAt(now)) + } + return stats +} + +// JoinedChildCPUStats implements the semantics of RUSAGE_CHILDREN: "Return +// resource usage statistics for all children of [tg] that have terminated and +// been waited for. These statistics will include the resources used by +// grandchildren, and further removed descendants, if all of the intervening +// descendants waited on their terminated children." +func (tg *ThreadGroup) JoinedChildCPUStats() usage.CPUStats { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.childCPUStats +} + +// StateStatus returns a string representation of the task's current state, +// appropriate for /proc/[pid]/status. +func (t *Task) StateStatus() string { + switch s := t.TaskGoroutineSchedInfo().State; s { + case TaskGoroutineNonexistent: + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + switch t.exitState { + case TaskExitZombie: + return "Z (zombie)" + case TaskExitDead: + return "X (dead)" + default: + // The task goroutine can't exit before passing through + // runExitNotify, so this indicates that the task has been created, + // but the task goroutine hasn't yet started. The Linux equivalent + // is struct task_struct::state == TASK_NEW + // (kernel/fork.c:copy_process() => + // kernel/sched/core.c:sched_fork()), but the TASK_NEW bit is + // masked out by TASK_REPORT for /proc/[pid]/status, leaving only + // TASK_RUNNING. + return "R (running)" + } + case TaskGoroutineRunningSys, TaskGoroutineRunningApp: + return "R (running)" + case TaskGoroutineBlockedInterruptible: + return "S (sleeping)" + case TaskGoroutineStopped: + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + switch t.stop.(type) { + case *groupStop: + return "T (stopped)" + case *ptraceStop: + return "t (tracing stop)" + } + fallthrough + case TaskGoroutineBlockedUninterruptible: + // This is the name Linux uses for TASK_UNINTERRUPTIBLE and + // TASK_KILLABLE (= TASK_UNINTERRUPTIBLE | TASK_WAKEKILL): + // fs/proc/array.c:task_state_array. + return "D (disk sleep)" + default: + panic(fmt.Sprintf("Invalid TaskGoroutineState: %v", s)) + } +} + +// CPUMask returns a copy of t's allowed CPU mask. +func (t *Task) CPUMask() sched.CPUSet { + t.mu.Lock() + defer t.mu.Unlock() + return t.allowedCPUMask.Copy() +} + +// SetCPUMask sets t's allowed CPU mask based on mask. It takes ownership of +// mask. +// +// Preconditions: mask.Size() == +// sched.CPUSetSize(t.Kernel().ApplicationCores()). +func (t *Task) SetCPUMask(mask sched.CPUSet) error { + if want := sched.CPUSetSize(t.k.applicationCores); mask.Size() != want { + panic(fmt.Sprintf("Invalid CPUSet %v (expected %d bytes)", mask, want)) + } + + // Remove CPUs in mask above Kernel.applicationCores. + mask.ClearAbove(t.k.applicationCores) + + // Ensure that at least 1 CPU is still allowed. + if mask.NumCPUs() == 0 { + return syserror.EINVAL + } + + if t.k.useHostCores { + // No-op; pretend the mask was immediately changed back. + return nil + } + + t.tg.pidns.owner.mu.RLock() + rootTID := t.tg.pidns.owner.Root.tids[t] + t.tg.pidns.owner.mu.RUnlock() + + t.mu.Lock() + defer t.mu.Unlock() + t.allowedCPUMask = mask + atomic.StoreInt32(&t.cpu, assignCPU(mask, rootTID)) + return nil +} + +// CPU returns the cpu id for a given task. +func (t *Task) CPU() int32 { + if t.k.useHostCores { + return int32(hostcpu.GetCPU()) + } + + return atomic.LoadInt32(&t.cpu) +} + +// assignCPU returns the virtualized CPU number for the task with global TID +// tid and allowedCPUMask allowed. +func assignCPU(allowed sched.CPUSet, tid ThreadID) (cpu int32) { + // To pretend that threads are evenly distributed to allowed CPUs, choose n + // to be less than the number of CPUs in allowed ... + n := int(tid) % int(allowed.NumCPUs()) + // ... then pick the nth CPU in allowed. + allowed.ForEachCPU(func(c uint) { + if n--; n == 0 { + cpu = int32(c) + } + }) + return cpu +} + +// Niceness returns t's niceness. +func (t *Task) Niceness() int { + t.mu.Lock() + defer t.mu.Unlock() + return t.niceness +} + +// Priority returns t's priority. +func (t *Task) Priority() int { + t.mu.Lock() + defer t.mu.Unlock() + return t.niceness + 20 +} + +// SetNiceness sets t's niceness to n. +func (t *Task) SetNiceness(n int) { + t.mu.Lock() + defer t.mu.Unlock() + t.niceness = n +} + +// NumaPolicy returns t's current numa policy. +func (t *Task) NumaPolicy() (policy int32, nodeMask uint32) { + t.mu.Lock() + defer t.mu.Unlock() + return t.numaPolicy, t.numaNodeMask +} + +// SetNumaPolicy sets t's numa policy. +func (t *Task) SetNumaPolicy(policy int32, nodeMask uint32) { + t.mu.Lock() + defer t.mu.Unlock() + t.numaPolicy = policy + t.numaNodeMask = nodeMask +} diff --git a/pkg/sentry/kernel/task_signals.go b/pkg/sentry/kernel/task_signals.go new file mode 100644 index 000000000..2340256b0 --- /dev/null +++ b/pkg/sentry/kernel/task_signals.go @@ -0,0 +1,1056 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// This file defines the behavior of task signal handling. + +import ( + "fmt" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SignalAction is an internal signal action. +type SignalAction int + +// Available signal actions. +// Note that although we refer the complete set internally, +// the application is only capable of using the Default and +// Ignore actions from the system call interface. +const ( + SignalActionTerm SignalAction = iota + SignalActionCore + SignalActionStop + SignalActionIgnore + SignalActionHandler +) + +// Default signal handler actions. Note that for most signals, +// (except SIGKILL and SIGSTOP) these can be overridden by the app. +var defaultActions = map[linux.Signal]SignalAction{ + // POSIX.1-1990 standard. + linux.SIGHUP: SignalActionTerm, + linux.SIGINT: SignalActionTerm, + linux.SIGQUIT: SignalActionCore, + linux.SIGILL: SignalActionCore, + linux.SIGABRT: SignalActionCore, + linux.SIGFPE: SignalActionCore, + linux.SIGKILL: SignalActionTerm, // but see ThreadGroup.applySignalSideEffects + linux.SIGSEGV: SignalActionCore, + linux.SIGPIPE: SignalActionTerm, + linux.SIGALRM: SignalActionTerm, + linux.SIGTERM: SignalActionTerm, + linux.SIGUSR1: SignalActionTerm, + linux.SIGUSR2: SignalActionTerm, + linux.SIGCHLD: SignalActionIgnore, + linux.SIGCONT: SignalActionIgnore, // but see ThreadGroup.applySignalSideEffects + linux.SIGSTOP: SignalActionStop, + linux.SIGTSTP: SignalActionStop, + linux.SIGTTIN: SignalActionStop, + linux.SIGTTOU: SignalActionStop, + // POSIX.1-2001 standard. + linux.SIGBUS: SignalActionCore, + linux.SIGPROF: SignalActionTerm, + linux.SIGSYS: SignalActionCore, + linux.SIGTRAP: SignalActionCore, + linux.SIGURG: SignalActionIgnore, + linux.SIGVTALRM: SignalActionTerm, + linux.SIGXCPU: SignalActionCore, + linux.SIGXFSZ: SignalActionCore, + // The rest on linux. + linux.SIGSTKFLT: SignalActionTerm, + linux.SIGIO: SignalActionTerm, + linux.SIGPWR: SignalActionTerm, + linux.SIGWINCH: SignalActionIgnore, +} + +// computeAction figures out what to do given a signal number +// and an arch.SignalAct. SIGSTOP always results in a SignalActionStop, +// and SIGKILL always results in a SignalActionTerm. +// Signal 0 is always ignored as many programs use it for various internal functions +// and don't expect it to do anything. +// +// In the event the signal is not one of these, act.Handler determines what +// happens next. +// If act.Handler is: +// 0, the default action is taken; +// 1, the signal is ignored; +// anything else, the function returns SignalActionHandler. +func computeAction(sig linux.Signal, act arch.SignalAct) SignalAction { + switch sig { + case linux.SIGSTOP: + return SignalActionStop + case linux.SIGKILL: + return SignalActionTerm + case linux.Signal(0): + return SignalActionIgnore + } + + switch act.Handler { + case arch.SignalActDefault: + return defaultActions[sig] + case arch.SignalActIgnore: + return SignalActionIgnore + default: + return SignalActionHandler + } +} + +// UnblockableSignals contains the set of signals which cannot be blocked. +var UnblockableSignals = linux.MakeSignalSet(linux.SIGKILL, linux.SIGSTOP) + +// StopSignals is the set of signals whose default action is SignalActionStop. +var StopSignals = linux.MakeSignalSet(linux.SIGSTOP, linux.SIGTSTP, linux.SIGTTIN, linux.SIGTTOU) + +// dequeueSignalLocked returns a pending unmasked signal. If there are no +// pending unmasked signals, dequeueSignalLocked returns nil. +// +// Preconditions: t.tg.signalHandlers.mu must be locked. +func (t *Task) dequeueSignalLocked() *arch.SignalInfo { + if info := t.pendingSignals.dequeue(t.tr.SignalMask); info != nil { + return info + } + if info := t.tg.pendingSignals.dequeue(t.tr.SignalMask); info != nil { + return info + } + return nil +} + +// TakeSignal returns a pending signal not blocked by mask. Signal handlers are +// not affected. If there are no pending signals not blocked by mask, +// TakeSignal returns a nil SignalInfo. +func (t *Task) TakeSignal(mask linux.SignalSet) *arch.SignalInfo { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if info := t.pendingSignals.dequeue(mask); info != nil { + return info + } + if info := t.tg.pendingSignals.dequeue(mask); info != nil { + return info + } + return nil +} + +// discardSpecificLocked removes all instances of the given signal from all +// signal queues in tg. +// +// Preconditions: The signal mutex must be locked. +func (tg *ThreadGroup) discardSpecificLocked(sig linux.Signal) { + tg.pendingSignals.discardSpecific(sig) + for t := tg.tasks.Front(); t != nil; t = t.Next() { + t.pendingSignals.discardSpecific(sig) + } +} + +// PendingSignals returns the set of pending signals. +func (t *Task) PendingSignals() linux.SignalSet { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + return t.pendingSignals.pendingSet | t.tg.pendingSignals.pendingSet +} + +// deliverSignal delivers the given signal and returns the following run state. +func (t *Task) deliverSignal(info *arch.SignalInfo, act arch.SignalAct) taskRunState { + sigact := computeAction(linux.Signal(info.Signo), act) + + if t.haveSyscallReturn { + if sre, ok := SyscallRestartErrnoFromReturn(t.Arch().Return()); ok { + // Signals that are ignored, cause a thread group stop, or + // terminate the thread group do not interact with interrupted + // syscalls; in Linux terms, they are never returned to the signal + // handling path from get_signal => get_signal_to_deliver. The + // behavior of an interrupted syscall is determined by the first + // signal that is actually handled (by userspace). + if sigact == SignalActionHandler { + switch { + case sre == ERESTARTNOHAND: + fallthrough + case sre == ERESTART_RESTARTBLOCK: + fallthrough + case (sre == ERESTARTSYS && !act.IsRestart()): + t.Debugf("Not restarting syscall %d after errno %d: interrupted by signal %d", t.Arch().SyscallNo(), sre, info.Signo) + t.Arch().SetReturn(uintptr(-t.ExtractErrno(syserror.EINTR, -1))) + default: + t.Debugf("Restarting syscall %d after errno %d: interrupted by signal %d", t.Arch().SyscallNo(), sre, info.Signo) + t.Arch().RestartSyscall() + } + } + } + } + + switch sigact { + case SignalActionTerm, SignalActionCore: + // "Default action is to terminate the process." - signal(7) + t.Debugf("Signal %d: terminating thread group", info.Signo) + t.PrepareGroupExit(ExitStatus{Signo: int(info.Signo)}) + return (*runExit)(nil) + + case SignalActionStop: + // "Default action is to stop the process." + t.initiateGroupStop(info) + + case SignalActionIgnore: + // "Default action is to ignore the signal." + t.Debugf("Signal %d: ignored", info.Signo) + + case SignalActionHandler: + // Try to deliver the signal to the user-configured handler. + t.Debugf("Signal %d: delivering to handler", info.Signo) + if err := t.deliverSignalToHandler(info, act); err != nil { + t.Warningf("Failed to deliver signal %+v to user handler: %v", info, err) + // Send a forced SIGSEGV. If the signal that couldn't be delivered + // was a SIGSEGV, force the handler to SIG_DFL. + t.forceSignal(linux.SIGSEGV, linux.Signal(info.Signo) == linux.SIGSEGV /* unconditional */) + t.SendSignal(sigPriv(linux.SIGSEGV)) + } + + default: + panic(fmt.Sprintf("Unknown signal action %+v, %d?", info, computeAction(linux.Signal(info.Signo), act))) + } + return (*runInterrupt)(nil) +} + +// deliverSignalToHandler changes the task's userspace state to enter the given +// user-configured handler for the given signal. +func (t *Task) deliverSignalToHandler(info *arch.SignalInfo, act arch.SignalAct) error { + // Signal delivery to an application handler interrupts restartable + // sequences. + t.rseqInterrupt() + + // Are executing on the main stack, + // or the provided alternate stack? + sp := usermem.Addr(t.Arch().Stack()) + + // N.B. This is a *copy* of the alternate stack that the user's signal + // handler expects to see in its ucontext (even if it's not in use). + alt := t.signalStack + if act.IsOnStack() && alt.IsEnabled() { + alt.SetOnStack() + if !t.OnSignalStack(alt) { + sp = usermem.Addr(alt.Top()) + } + } + + // Set up the signal handler. If we have a saved signal mask, the signal + // handler should run with the current mask, but sigreturn should restore + // the saved one. + st := &arch.Stack{t.Arch(), t.MemoryManager(), sp} + mask := t.tr.SignalMask + if t.haveSavedSignalMask { + mask = t.savedSignalMask + } + if err := t.Arch().SignalSetup(st, &act, info, &alt, mask); err != nil { + return err + } + t.haveSavedSignalMask = false + + // Add our signal mask. + newMask := t.tr.SignalMask | act.Mask + if !act.IsNoDefer() { + newMask |= linux.SignalSetOf(linux.Signal(info.Signo)) + } + t.SetSignalMask(newMask) + + return nil +} + +var ctrlResume = &SyscallControl{ignoreReturn: true} + +// SignalReturn implements sigreturn(2) (if rt is false) or rt_sigreturn(2) (if +// rt is true). +func (t *Task) SignalReturn(rt bool) (*SyscallControl, error) { + st := t.Stack() + sigset, err := t.Arch().SignalRestore(st, rt) + if err != nil { + return nil, err + } + + // Restore our signal mask. SIGKILL and SIGSTOP should not be blocked. + t.SetSignalMask(sigset &^ UnblockableSignals) + + // TODO: sys_rt_sigreturn also calls restore_altstack from + // uc.stack, allowing the signal handler to implicitly mutate the signal + // stack. + + return ctrlResume, nil +} + +// SendSignal sends the given signal to t. +// +// The following errors may be returned: +// +// syserror.ESRCH - The task has exited. +// syserror.EINVAL - The signal is not valid. +// syserror.EAGAIN - THe signal is realtime, and cannot be queued. +// +func (t *Task) SendSignal(info *arch.SignalInfo) error { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + return t.sendSignalLocked(info, false /* group */) +} + +// SendGroupSignal sends the given signal to t's thread group. +func (t *Task) SendGroupSignal(info *arch.SignalInfo) error { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + return t.sendSignalLocked(info, true /* group */) +} + +// SendSignal sends the given signal to tg, using tg's leader to determine if +// the signal is blocked. +func (tg *ThreadGroup) SendSignal(info *arch.SignalInfo) error { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + tg.signalHandlers.mu.Lock() + defer tg.signalHandlers.mu.Unlock() + return tg.leader.sendSignalLocked(info, true /* group */) +} + +// Preconditions: The TaskSet mutex must be locked. +func (t *Task) onCPULocked(includeSys bool) bool { + // Task is exiting. + if t.exitState != TaskExitNone { + return false + } + + switch t.TaskGoroutineSchedInfo().State { + case TaskGoroutineRunningSys: + return includeSys + case TaskGoroutineRunningApp: + return true + default: + return false + } +} + +// SendTimerSignal mimics the process timer signal delivery behavior in linux: +// signals are delivered to the thread that triggers the timer expiration (see +// kernel/time/posix-cpu-timers.c:check_process_timers(). This +// means +// 1) the thread is running on cpu at the time. +// 2) a thread runs more frequently will get more of those signals. +// +// We approximate this behavior by selecting a running task in a round-robin +// fashion. Statistically, a thread running more often should have a higher +// probability to be selected. +func (tg *ThreadGroup) SendTimerSignal(info *arch.SignalInfo, includeSys bool) error { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + tg.signalHandlers.mu.Lock() + defer tg.signalHandlers.mu.Unlock() + + // Find the next running threads. + var t *Task + if tg.lastTimerSignalTask == nil { + t = tg.tasks.Front() + } else { + t = tg.lastTimerSignalTask.Next() + } + + // Iterate from lastTimerSignalTask.Next() to the last task in the task list. + for t != nil { + if t.onCPULocked(includeSys) { + tg.lastTimerSignalTask = t + return t.sendSignalLocked(info, true /* group */) + } + t = t.Next() + } + + // t is nil when we reach here. If lastTimerSignalTask is not nil, iterate + // from Front to lastTimerSignalTask. + if tg.lastTimerSignalTask != nil { + for t := tg.tasks.Front(); t != tg.lastTimerSignalTask.Next(); t = t.Next() { + if t.onCPULocked(includeSys) { + tg.lastTimerSignalTask = t + return t.sendSignalLocked(info, true /* group */) + } + } + } + + // No running threads? Just try the leader. + tg.lastTimerSignalTask = tg.leader + return tg.leader.sendSignalLocked(info, true /* group */) +} + +func (t *Task) sendSignalLocked(info *arch.SignalInfo, group bool) error { + if t.exitState == TaskExitDead { + return syserror.ESRCH + } + sig := linux.Signal(info.Signo) + if sig == 0 { + return nil + } + if !sig.IsValid() { + return syserror.EINVAL + } + + // Signal side effects apply even if the signal is ultimately discarded. + t.tg.applySignalSideEffectsLocked(sig) + + // TODO: "Only signals for which the "init" process has established a + // signal handler can be sent to the "init" process by other members of the + // PID namespace. This restriction applies even to privileged processes, + // and prevents other members of the PID namespace from accidentally + // killing the "init" process." - pid_namespaces(7). We don't currently do + // this for child namespaces, though we should; we also don't do this for + // the root namespace (the same restriction applies to global init on + // Linux), where whether or not we should is much murkier. In practice, + // most sandboxed applications are not prepared to function as an init + // process. + + // Unmasked, ignored signals are discarded without being queued, unless + // they will be visible to a tracer. Even for group signals, it's the + // originally-targeted task's signal mask and tracer that matter; compare + // Linux's kernel/signal.c:__send_signal() => prepare_signal() => + // sig_ignored(). + ignored := computeAction(sig, t.tg.signalHandlers.actions[sig]) == SignalActionIgnore + if linux.SignalSetOf(sig)&t.tr.SignalMask == 0 && ignored && !t.hasTracer() { + t.Debugf("Discarding ignored signal %d", sig) + return nil + } + + q := &t.pendingSignals + if group { + q = &t.tg.pendingSignals + } + if !q.enqueue(info) { + if sig.IsRealtime() { + return syserror.EAGAIN + } + t.Debugf("Discarding duplicate signal %d", sig) + return nil + } + + // Find a receiver to notify. Note that the task we choose to notify, if + // any, may not be the task that actually dequeues and handles the signal; + // e.g. a racing signal mask change may cause the notified task to become + // ineligible, or a racing sibling task may dequeue the signal first. + if t.canReceiveSignalLocked(sig) { + t.Debugf("Notified of signal %d", sig) + t.interrupt() + return nil + } + if group { + if nt := t.tg.findSignalReceiverLocked(sig); nt != nil { + nt.Debugf("Notified of group signal %d", sig) + nt.interrupt() + return nil + } + } + t.Debugf("No task notified of signal %d", sig) + return nil +} + +func (tg *ThreadGroup) applySignalSideEffectsLocked(sig linux.Signal) { + switch { + case linux.SignalSetOf(sig)&StopSignals != 0: + // Stop signals cause all prior SIGCONT to be discarded. (This is + // despite the fact this has little effect since SIGCONT's most + // important effect is applied when the signal is sent in the branch + // below, not when the signal is delivered.) + tg.discardSpecificLocked(linux.SIGCONT) + case sig == linux.SIGCONT: + // "The SIGCONT signal has a side effect of waking up (all threads of) + // a group-stopped process. This side effect happens before + // signal-delivery-stop. The tracer can't suppress this side effect (it + // can only suppress signal injection, which only causes the SIGCONT + // handler to not be executed in the tracee, if such a handler is + // installed." - ptrace(2) + tg.endGroupStopLocked(true) + case sig == linux.SIGKILL: + // "SIGKILL does not generate signal-delivery-stop and therefore the + // tracer can't suppress it. SIGKILL kills even within system calls + // (syscall-exit-stop is not generated prior to death by SIGKILL)." - + // ptrace(2) + // + // Note that this differs from ThreadGroup.requestExit in that it + // ignores tg.execing. + if !tg.exiting { + tg.exiting = true + tg.exitStatus = ExitStatus{Signo: int(linux.SIGKILL)} + } + for t := tg.tasks.Front(); t != nil; t = t.Next() { + t.killLocked() + } + } +} + +// canReceiveSignalLocked returns true if t should be interrupted to receive +// the given signal. canReceiveSignalLocked is analogous to Linux's +// kernel/signal.c:wants_signal(), but see below for divergences. +// +// Preconditions: The signal mutex must be locked. +func (t *Task) canReceiveSignalLocked(sig linux.Signal) bool { + // - Do not choose tasks that are blocking the signal. + if linux.SignalSetOf(sig)&t.tr.SignalMask != 0 { + return false + } + // - No need to check Task.exitState, as the exit path sets every bit in the + // signal mask when it transitions from TaskExitNone to TaskExitInitiated. + // - No special case for SIGKILL: SIGKILL already interrupted all tasks in the + // task group via applySignalSideEffects => killLocked. + // - Do not choose stopped tasks, which cannot handle signals. + if t.stop != nil { + return false + } + // - TODO: No special case for when t is also the sending task, + // because the identity of the sender is unknown. + // - Do not choose tasks that have already been interrupted, as they may be + // busy handling another signal. + if len(t.interruptChan) != 0 { + return false + } + return true +} + +// findSignalReceiverLocked returns a task in tg that should be interrupted to +// receive the given signal. If no such task exists, findSignalReceiverLocked +// returns nil. +// +// Linux actually records curr_target to balance the group signal targets. +// +// Preconditions: The signal mutex must be locked. +func (tg *ThreadGroup) findSignalReceiverLocked(sig linux.Signal) *Task { + for t := tg.tasks.Front(); t != nil; t = t.Next() { + if t.canReceiveSignalLocked(sig) { + return t + } + } + return nil +} + +// forceSignal ensures that the task is not ignoring or blocking the given +// signal. If unconditional is true, forceSignal takes action even if the +// signal isn't being ignored or blocked. +func (t *Task) forceSignal(sig linux.Signal, unconditional bool) { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.forceSignalLocked(sig, unconditional) +} + +func (t *Task) forceSignalLocked(sig linux.Signal, unconditional bool) { + blocked := linux.SignalSetOf(sig)&t.tr.SignalMask != 0 + act := t.tg.signalHandlers.actions[sig] + ignored := act.Handler == arch.SignalActIgnore + if blocked || ignored || unconditional { + act.Handler = arch.SignalActDefault + t.tg.signalHandlers.actions[sig] = act + if blocked { + t.setSignalMaskLocked(t.tr.SignalMask &^ linux.SignalSetOf(sig)) + } + } +} + +// SignalMask returns a copy of t's signal mask. +func (t *Task) SignalMask() linux.SignalSet { + return linux.SignalSet(atomic.LoadUint64((*uint64)(&t.tr.SignalMask))) +} + +// SetSignalMask sets t's signal mask. +// +// Preconditions: SetSignalMask can only be called by the task goroutine. +// t.exitState < TaskExitZombie. +func (t *Task) SetSignalMask(mask linux.SignalSet) { + // By precondition, t prevents t.tg from completing an execve and mutating + // t.tg.signalHandlers, so we can skip the TaskSet mutex. + t.tg.signalHandlers.mu.Lock() + t.setSignalMaskLocked(mask) + t.tg.signalHandlers.mu.Unlock() +} + +// Preconditions: The signal mutex must be locked. +func (t *Task) setSignalMaskLocked(mask linux.SignalSet) { + oldMask := t.tr.SignalMask + atomic.StoreUint64((*uint64)(&t.tr.SignalMask), uint64(mask)) + + // If the new mask blocks any signals that were not blocked by the old + // mask, and at least one such signal is pending in tg.pendingSignals, and + // t has been woken, it could be the case that t was woken to handle that + // signal, but will no longer do so as a result of its new signal mask, so + // we have to pick a replacement. + blocked := mask &^ oldMask + blockedGroupPending := blocked & t.tg.pendingSignals.pendingSet + if blockedGroupPending != 0 && t.interrupted() { + linux.ForEachSignal(blockedGroupPending, func(sig linux.Signal) { + if nt := t.tg.findSignalReceiverLocked(sig); nt != nil { + nt.interrupt() + return + } + }) + // We have to re-issue the interrupt consumed by t.interrupted() since + // it might have been for a different reason. + t.interruptSelf() + } + + // Conversely, if the new mask unblocks any signals that were blocked by + // the old mask, and at least one such signal is pending, we may now need + // to handle that signal. + unblocked := oldMask &^ mask + unblockedPending := unblocked & (t.pendingSignals.pendingSet | t.tg.pendingSignals.pendingSet) + if unblockedPending != 0 { + t.interruptSelf() + } +} + +// SetSavedSignalMask sets the saved signal mask (see Task.savedSignalMask's +// comment). +// +// Preconditions: SetSavedSignalMask can only be called by the task goroutine. +func (t *Task) SetSavedSignalMask(mask linux.SignalSet) { + t.savedSignalMask = mask + t.haveSavedSignalMask = true +} + +// SignalStack returns the task-private signal stack. +func (t *Task) SignalStack() arch.SignalStack { + return t.signalStack +} + +// OnSignalStack returns true if, when the task resumes running, it will run on +// the task-private signal stack. +func (t *Task) OnSignalStack(s arch.SignalStack) bool { + sp := usermem.Addr(t.Arch().Stack()) + return usermem.Addr(s.Addr) <= sp && sp < usermem.Addr(s.Addr+s.Size) +} + +// SetSignalStack sets the task-private signal stack and clears the +// SignalStackFlagDisable, since we have a signal stack. +func (t *Task) SetSignalStack(alt arch.SignalStack) error { + // Mask out irrelevant parts: only disable matters. + alt.Flags &= arch.SignalStackFlagDisable + t.signalStack = alt + return nil +} + +// SetSignalAct atomically sets the thread group's signal action for signal sig +// to *actptr (if actptr is not nil) and returns the old signal action. +func (tg *ThreadGroup) SetSignalAct(sig linux.Signal, actptr *arch.SignalAct) (arch.SignalAct, error) { + if !sig.IsValid() { + return arch.SignalAct{}, syserror.EINVAL + } + + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + sh := tg.signalHandlers + sh.mu.Lock() + defer sh.mu.Unlock() + oldact := sh.actions[sig] + if actptr != nil { + if sig == linux.SIGKILL || sig == linux.SIGSTOP { + return oldact, syserror.EINVAL + } + + act := *actptr + act.Mask &^= UnblockableSignals + sh.actions[sig] = act + // From POSIX, by way of Linux: + // + // "Setting a signal action to SIG_IGN for a signal that is pending + // shall cause the pending signal to be discarded, whether or not it is + // blocked." + // + // "Setting a signal action to SIG_DFL for a signal that is pending and + // whose default action is to ignore the signal (for example, SIGCHLD), + // shall cause the pending signal to be discarded, whether or not it is + // blocked." + if computeAction(sig, act) == SignalActionIgnore { + tg.discardSpecificLocked(sig) + } + } + return oldact, nil +} + +// CopyOutSignalAct converts the given SignalAct into an architecture-specific +// type and then copies it out to task memory. +func (t *Task) CopyOutSignalAct(addr usermem.Addr, s *arch.SignalAct) error { + n := t.Arch().NewSignalAct() + n.SerializeFrom(s) + _, err := t.CopyOut(addr, n) + return err +} + +// CopyInSignalAct copies an architecture-specific sigaction type from task +// memory and then converts it into a SignalAct. +func (t *Task) CopyInSignalAct(addr usermem.Addr) (arch.SignalAct, error) { + n := t.Arch().NewSignalAct() + var s arch.SignalAct + if _, err := t.CopyIn(addr, n); err != nil { + return s, err + } + n.DeserializeTo(&s) + return s, nil +} + +// CopyOutSignalStack converts the given SignalStack into an +// architecture-specific type and then copies it out to task memory. +func (t *Task) CopyOutSignalStack(addr usermem.Addr, s *arch.SignalStack) error { + n := t.Arch().NewSignalStack() + n.SerializeFrom(s) + _, err := t.CopyOut(addr, n) + return err +} + +// CopyInSignalStack copies an architecture-specific stack_t from task memory +// and then converts it into a SignalStack. +func (t *Task) CopyInSignalStack(addr usermem.Addr) (arch.SignalStack, error) { + n := t.Arch().NewSignalStack() + var s arch.SignalStack + if _, err := t.CopyIn(addr, n); err != nil { + return s, err + } + n.DeserializeTo(&s) + return s, nil +} + +// groupStop is a TaskStop placed on tasks that have received a stop signal +// (SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU). (The term "group-stop" originates from +// the ptrace man page.) +type groupStop struct{} + +// Killable implements TaskStop.Killable. +func (*groupStop) Killable() bool { return true } + +type groupStopPhase int + +const ( + // groupStopNone indicates that a thread group is not in, or attempting to + // enter or leave, a group stop. + groupStopNone groupStopPhase = iota + + // groupStopDequeued indicates that at least one task in a thread group has + // dequeued a stop signal (or dequeued any signal and entered a + // signal-delivery-stop as a result, which allows ptrace to change the + // signal into a stop signal), but temporarily dropped the signal mutex + // without initiating the group stop. + // + // groupStopDequeued is analogous to JOBCTL_STOP_DEQUEUED in Linux. + groupStopDequeued + + // groupStopInitiated indicates that a task in a thread group has initiated + // a group stop, but not all tasks in the thread group have acknowledged + // entering the group stop. + // + // groupStopInitiated is represented by JOBCTL_STOP_PENDING && + // !SIGNAL_STOP_STOPPED in Linux. + groupStopInitiated + + // groupStopComplete indicates that all tasks in a thread group have + // acknowledged entering the group stop, and the last one to do so has + // notified the thread group's parent. + // + // groupStopComplete is represented by JOBCTL_STOP_PENDING && + // SIGNAL_STOP_STOPPED in Linux. + groupStopComplete +) + +// initiateGroupStop attempts to initiate a group stop based on a +// previously-dequeued stop signal. +// +// Preconditions: The caller must be running on the task goroutine. +func (t *Task) initiateGroupStop(info *arch.SignalInfo) { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + if t.tg.groupStopPhase != groupStopDequeued { + t.Debugf("Signal %d: not stopping thread group: lost to racing signal", info.Signo) + return + } + if t.tg.exiting { + t.Debugf("Signal %d: not stopping thread group: lost to racing group exit", info.Signo) + return + } + if t.tg.execing != nil { + t.Debugf("Signal %d: not stopping thread group: lost to racing execve", info.Signo) + return + } + t.Debugf("Signal %d: stopping thread group", info.Signo) + t.tg.groupStopPhase = groupStopInitiated + t.tg.groupStopSignal = linux.Signal(info.Signo) + t.tg.groupStopCount = 0 + for t2 := t.tg.tasks.Front(); t2 != nil; t2 = t2.Next() { + t2.groupStopRequired = true + t2.groupStopAcknowledged = false + t2.interrupt() + } +} + +// endGroupStopLocked ensures that all prior stop signals received by tg are +// not stopping tg and will not stop tg in the future. If broadcast is true, +// parent and tracer notification will be scheduled if appropriate. +// +// Preconditions: The signal mutex must be locked. +func (tg *ThreadGroup) endGroupStopLocked(broadcast bool) { + // Discard all previously-queued stop signals. + linux.ForEachSignal(StopSignals, tg.discardSpecificLocked) + + if tg.groupStopPhase != groupStopNone { + tg.leader.Debugf("Ending group stop currently in phase %d", tg.groupStopPhase) + if tg.groupStopPhase == groupStopInitiated || tg.groupStopPhase == groupStopComplete { + tg.groupStopSignal = 0 + for t := tg.tasks.Front(); t != nil; t = t.Next() { + if _, ok := t.stop.(*groupStop); ok { + t.endInternalStopLocked() + } + } + if broadcast { + // Instead of notifying the parent here, set groupContNotify so + // that one of the continuing tasks does so. (Linux does + // something similar.) The reason we do this is to keep locking + // sane. In order to send a signal to the parent, we need to + // lock its signal mutex, but we're already holding tg's signal + // mutex, and the TaskSet mutex must be locked for writing for + // us to hold two signal mutexes. Since we don't want to + // require this for endGroupStopLocked (which is called from + // signal-sending paths), nor do we want to lose atomicity by + // releasing the mutexes we're already holding, just let the + // continuing thread group deal with it. + tg.groupContNotify = true + tg.groupContInterrupted = tg.groupStopPhase == groupStopInitiated + tg.groupContWaitable = true + } + } + // If groupStopPhase was groupStopDequeued, setting it to groupStopNone + // will cause following calls to initiateGroupStop to recognize that + // the group stop has been cancelled. + tg.groupStopPhase = groupStopNone + } +} + +// signalStop sends a signal to t's thread group of a new group stop, group +// continue, or ptrace stop, if appropriate. code and status are set in the +// signal sent to tg, if any. +// +// Preconditions: The TaskSet mutex must be locked (for reading or writing). +func (t *Task) signalStop(target *Task, code int32, status int32) { + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + act, ok := t.tg.signalHandlers.actions[linux.SIGCHLD] + if !ok || (act.Handler != arch.SignalActIgnore && act.Flags&arch.SignalFlagNoCldStop == 0) { + sigchld := &arch.SignalInfo{ + Signo: int32(linux.SIGCHLD), + Code: code, + } + sigchld.SetPid(int32(t.tg.pidns.tids[target])) + sigchld.SetUid(int32(target.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + sigchld.SetStatus(status) + // TODO: Set utime, stime. + t.sendSignalLocked(sigchld, true /* group */) + } +} + +// The runInterrupt state handles conditions indicated by interrupts. +type runInterrupt struct{} + +func (*runInterrupt) execute(t *Task) taskRunState { + // Interrupts are de-duplicated (if t is interrupted twice before + // t.interrupted() is called, t.interrupted() will only return true once), + // so early exits from this function must re-enter the runInterrupt state + // to check for more interrupt-signaled conditions. + + t.tg.signalHandlers.mu.Lock() + + // Did we just leave a group stop? + if t.tg.groupContNotify { + t.tg.groupContNotify = false + sig := t.tg.groupStopSignal + intr := t.tg.groupContInterrupted + t.tg.signalHandlers.mu.Unlock() + t.tg.pidns.owner.mu.RLock() + // For consistency with Linux, if the parent and (thread group + // leader's) tracer are in the same thread group, deduplicate + // notifications. + notifyParent := t.tg.leader.parent != nil + if tracer := t.tg.leader.ptraceTracer.Load().(*Task); tracer != nil { + if notifyParent && tracer.tg == t.tg.leader.parent.tg { + notifyParent = false + } + // Sending CLD_STOPPED to the tracer doesn't really make any sense; + // the thread group leader may have already entered the stop and + // notified its tracer accordingly. But it's consistent with + // Linux... + if intr { + tracer.signalStop(t.tg.leader, arch.CLD_STOPPED, int32(sig)) + if !notifyParent { + tracer.tg.eventQueue.Notify(EventGroupContinue | EventTraceeStop | EventChildGroupStop) + } else { + tracer.tg.eventQueue.Notify(EventGroupContinue | EventTraceeStop) + } + } else { + tracer.signalStop(t.tg.leader, arch.CLD_CONTINUED, int32(sig)) + tracer.tg.eventQueue.Notify(EventGroupContinue) + } + } + if notifyParent { + // If groupContInterrupted, do as Linux does and pretend the group + // stop completed just before it ended. The theoretical behavior in + // this case would be to send a SIGCHLD indicating the completed + // stop, followed by a SIGCHLD indicating the continue. However, + // SIGCHLD is a standard signal, so the latter would always be + // dropped. Hence sending only the former is equivalent. + if intr { + t.tg.leader.parent.signalStop(t.tg.leader, arch.CLD_STOPPED, int32(sig)) + t.tg.leader.parent.tg.eventQueue.Notify(EventGroupContinue | EventChildGroupStop) + } else { + t.tg.leader.parent.signalStop(t.tg.leader, arch.CLD_CONTINUED, int32(sig)) + t.tg.leader.parent.tg.eventQueue.Notify(EventGroupContinue) + } + } + t.tg.pidns.owner.mu.RUnlock() + return (*runInterrupt)(nil) + } + + // Do we need to enter a group stop? + if t.groupStopRequired { + t.groupStopRequired = false + sig := t.tg.groupStopSignal + notifyParent := false + if !t.groupStopAcknowledged { + t.groupStopAcknowledged = true + t.tg.groupStopCount++ + if t.tg.groupStopCount == t.tg.activeTasks { + t.Debugf("Completing group stop") + notifyParent = true + t.tg.groupStopPhase = groupStopComplete + t.tg.groupStopWaitable = true + t.tg.groupContNotify = false + t.tg.groupContWaitable = false + } + } + // Drop the signal mutex so we can take the TaskSet mutex. + t.tg.signalHandlers.mu.Unlock() + + t.tg.pidns.owner.mu.RLock() + if t.tg.leader.parent == nil { + notifyParent = false + } + if tracer := t.Tracer(); tracer != nil { + t.ptraceCode = int32(sig) + t.ptraceSiginfo = nil + if t.beginPtraceStopLocked() { + tracer.signalStop(t, arch.CLD_STOPPED, int32(sig)) + // For consistency with Linux, if the parent and tracer are in the + // same thread group, deduplicate notification signals. + if notifyParent && tracer.tg == t.tg.leader.parent.tg { + notifyParent = false + tracer.tg.eventQueue.Notify(EventChildGroupStop | EventTraceeStop) + } else { + tracer.tg.eventQueue.Notify(EventTraceeStop) + } + } + } else { + t.tg.signalHandlers.mu.Lock() + if !t.killedLocked() { + t.beginInternalStopLocked((*groupStop)(nil)) + } + t.tg.signalHandlers.mu.Unlock() + } + if notifyParent { + t.tg.leader.parent.signalStop(t.tg.leader, arch.CLD_STOPPED, int32(sig)) + t.tg.leader.parent.tg.eventQueue.Notify(EventChildGroupStop) + } + t.tg.pidns.owner.mu.RUnlock() + + return (*runInterrupt)(nil) + } + + // Are there signals pending? + if info := t.dequeueSignalLocked(); info != nil { + if linux.SignalSetOf(linux.Signal(info.Signo))&StopSignals != 0 && t.tg.groupStopPhase == groupStopNone { + // Indicate that we've dequeued a stop signal before + // unlocking the signal mutex; initiateGroupStop will check + // that the phase hasn't changed (or is at least another + // "stop signal dequeued" phase) after relocking it. + t.tg.groupStopPhase = groupStopDequeued + } + if t.ptraceSignalLocked(info) { + // Dequeueing the signal action must wait until after the + // signal-delivery-stop ends since the tracer can change or + // suppress the signal. + t.tg.signalHandlers.mu.Unlock() + return (*runInterruptAfterSignalDeliveryStop)(nil) + } + act := t.tg.signalHandlers.dequeueAction(linux.Signal(info.Signo)) + t.tg.signalHandlers.mu.Unlock() + return t.deliverSignal(info, act) + } + + t.tg.signalHandlers.mu.Unlock() + return (*runApp)(nil) +} + +type runInterruptAfterSignalDeliveryStop struct{} + +func (*runInterruptAfterSignalDeliveryStop) execute(t *Task) taskRunState { + t.tg.pidns.owner.mu.Lock() + // Can't defer unlock: deliverSignal must be called without holding TaskSet + // mutex. + sig := linux.Signal(t.ptraceCode) + defer func() { + t.ptraceSiginfo = nil + }() + if !sig.IsValid() { + t.tg.pidns.owner.mu.Unlock() + return (*runInterrupt)(nil) + } + info := t.ptraceSiginfo + if sig != linux.Signal(info.Signo) { + info.Signo = int32(sig) + info.Errno = 0 + info.Code = arch.SignalInfoUser + // pid isn't a valid field for all signal numbers, but Linux + // doesn't care (kernel/signal.c:ptrace_signal()). + // + // Linux uses t->parent for the tid and uid here, which is the tracer + // if it hasn't detached or the real parent otherwise. + parent := t.parent + if tracer := t.Tracer(); tracer != nil { + parent = tracer + } + if parent == nil { + // Tracer has detached and t was created by Kernel.CreateProcess(). + // Pretend the parent is in an ancestor PID + user namespace. + info.SetPid(0) + info.SetUid(int32(auth.OverflowUID)) + } else { + info.SetPid(int32(t.tg.pidns.tids[parent])) + info.SetUid(int32(parent.Credentials().RealKUID.In(t.UserNamespace()).OrOverflow())) + } + } + t.tg.signalHandlers.mu.Lock() + t.tg.pidns.owner.mu.Unlock() + // If the signal is masked, re-queue it. + if linux.SignalSetOf(sig)&t.tr.SignalMask != 0 { + t.sendSignalLocked(info, false /* group */) + t.tg.signalHandlers.mu.Unlock() + return (*runInterrupt)(nil) + } + act := t.tg.signalHandlers.dequeueAction(linux.Signal(info.Signo)) + t.tg.signalHandlers.mu.Unlock() + return t.deliverSignal(info, act) +} diff --git a/pkg/sentry/kernel/task_start.go b/pkg/sentry/kernel/task_start.go new file mode 100644 index 000000000..801cb3395 --- /dev/null +++ b/pkg/sentry/kernel/task_start.go @@ -0,0 +1,252 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/futex" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// TaskConfig defines the configuration of a new Task (see below). +type TaskConfig struct { + // Kernel is the owning Kernel. + *Kernel + + // Parent is the new task's parent. Parent may be nil. + Parent *Task + + // ThreadGroup is the ThreadGroup the new task belongs to. + *ThreadGroup + + // TaskContext is the TaskContext of the new task. + *TaskContext + + // TaskResources is the TaskResources of the new task. + *TaskResources + + // Credentials is the Credentials of the new task. + Credentials *auth.Credentials + + // Niceness is the niceness of the new task. + Niceness int + + // If NetworkNamespaced is true, the new task should observe a non-root + // network namespace. + NetworkNamespaced bool + + // AllowedCPUMask contains the cpus that this task can run on. + AllowedCPUMask sched.CPUSet + + // UTSNamespace is the UTSNamespace of the new task. + UTSNamespace *UTSNamespace + + // IPCNamespace is the IPCNamespace of the new task. + IPCNamespace *IPCNamespace +} + +// NewTask creates a new task defined by TaskConfig. +// Whether or not NewTask is successful, it takes ownership of both TaskContext +// and TaskResources of the TaskConfig. +// +// NewTask does not start the returned task; the caller must call Task.Start. +func (ts *TaskSet) NewTask(cfg *TaskConfig) (*Task, error) { + t, err := ts.newTask(cfg) + if err != nil { + cfg.TaskContext.release() + cfg.TaskResources.release() + return nil, err + } + return t, nil +} + +// newTask is a helper for TaskSet.NewTask that only takes ownership of TaskContext +// and TaskResources of the TaskConfig if it succeeds. +func (ts *TaskSet) newTask(cfg *TaskConfig) (*Task, error) { + tg := cfg.ThreadGroup + tc := cfg.TaskContext + t := &Task{ + taskNode: taskNode{ + tg: tg, + parent: cfg.Parent, + children: make(map[*Task]struct{}), + }, + runState: (*runApp)(nil), + interruptChan: make(chan struct{}, 1), + signalStack: arch.SignalStack{Flags: arch.SignalStackFlagDisable}, + tc: *tc, + tr: *cfg.TaskResources, + p: cfg.Kernel.Platform.NewContext(), + k: cfg.Kernel, + ptraceTracees: make(map[*Task]struct{}), + allowedCPUMask: cfg.AllowedCPUMask.Copy(), + ioUsage: &usage.IO{}, + creds: cfg.Credentials, + niceness: cfg.Niceness, + netns: cfg.NetworkNamespaced, + utsns: cfg.UTSNamespace, + ipcns: cfg.IPCNamespace, + rseqCPU: -1, + futexWaiter: futex.NewWaiter(), + } + t.endStopCond.L = &t.tg.signalHandlers.mu + t.ptraceTracer.Store((*Task)(nil)) + // We don't construct t.blockingTimer until Task.run(); see that function + // for justification. + + // Make the new task (and possibly thread group) visible to the rest of + // the system atomically. + ts.mu.Lock() + defer ts.mu.Unlock() + tg.signalHandlers.mu.Lock() + defer tg.signalHandlers.mu.Unlock() + if tg.exiting || tg.execing != nil { + // If the caller is in the same thread group, then what we return + // doesn't matter too much since the caller will exit before it returns + // to userspace. If the caller isn't in the same thread group, then + // we're in uncharted territory and can return whatever we want. + return nil, syserror.EINTR + } + if err := ts.assignTIDsLocked(t); err != nil { + return nil, err + } + // Below this point, newTask is expected not to fail (there is no rollback + // of assignTIDsLocked or any of the following). + + // Logging on t's behalf will panic if t.logPrefix hasn't been initialized. + // This is the earliest point at which we can do so (since t now has thread + // IDs). + t.updateLogPrefixLocked() + + if t.parent != nil { + t.parent.children[t] = struct{}{} + } + + if tg.leader == nil { + // New thread group. + tg.leader = t + if parentPG := tg.parentPG(); parentPG == nil { + tg.createSession() + } else { + // Inherit the process group. + parentPG.incRefWithParent(parentPG) + tg.processGroup = parentPG + } + } + tg.tasks.PushBack(t) + tg.tasksCount++ + tg.liveTasks++ + tg.activeTasks++ + + // Propagate external TaskSet stops to the new task. + t.stopCount = ts.stopCount + + t.mu.Lock() + defer t.mu.Unlock() + + t.cpu = assignCPU(t.allowedCPUMask, ts.Root.tids[t]) + + t.startTime = t.k.RealtimeClock().Now() + + return t, nil +} + +// assignTIDsLocked ensures that new task t is visible in all PID namespaces in +// which it should be visible. +// +// Preconditions: ts.mu must be locked for writing. +func (ts *TaskSet) assignTIDsLocked(t *Task) error { + type allocatedTID struct { + ns *PIDNamespace + tid ThreadID + } + var allocatedTIDs []allocatedTID + for ns := t.tg.pidns; ns != nil; ns = ns.parent { + tid, err := ns.allocateTID() + if err != nil { + // Failure. Remove the tids we already allocated in descendant + // namespaces. + for _, a := range allocatedTIDs { + delete(a.ns.tasks, a.tid) + delete(a.ns.tids, t) + } + return err + } + ns.tasks[tid] = t + ns.tids[t] = tid + allocatedTIDs = append(allocatedTIDs, allocatedTID{ns, tid}) + } + return nil +} + +// allocateTID returns an unused ThreadID from ns. +// +// Preconditions: ns.owner.mu must be locked for writing. +func (ns *PIDNamespace) allocateTID() (ThreadID, error) { + if ns.exiting { + // "In this case, a subsequent fork(2) into this PID namespace will + // fail with the error ENOMEM; it is not possible to create a new + // processes [sic] in a PID namespace whose init process has + // terminated." - pid_namespaces(7) + return 0, syserror.ENOMEM + } + tid := ns.last + for { + // Next. + tid++ + if tid > TasksLimit { + tid = InitTID + 1 + } + + // Is it available? + _, ok := ns.tasks[tid] + if !ok { + ns.last = tid + return tid, nil + } + + // Did we do a full cycle? + if tid == ns.last { + // No tid available. + return 0, syserror.EAGAIN + } + } +} + +// Start starts the task goroutine. Start must be called exactly once for each +// task returned by NewTask. +// +// 'tid' must be the task's TID in the root PID namespace and it's used for +// debugging purposes only (set as parameter to Task.run to make it visible +// in stack dumps). +func (t *Task) Start(tid ThreadID) { + // If the task was restored, it may be "starting" after having already exited. + if t.runState == nil { + return + } + t.goroutineStopped.Add(1) + t.tg.liveGoroutines.Add(1) + t.tg.pidns.owner.liveGoroutines.Add(1) + t.tg.pidns.owner.runningGoroutines.Add(1) + + // Task is now running in system mode. + t.accountTaskGoroutineLeave(TaskGoroutineNonexistent) + + // Use the task's TID in the root PID namespace to make it visible in stack dumps. + go t.run(uintptr(tid)) // S/R-SAFE: synchronizes with saving through stops +} diff --git a/pkg/sentry/kernel/task_stop.go b/pkg/sentry/kernel/task_stop.go new file mode 100644 index 000000000..feaf6cae4 --- /dev/null +++ b/pkg/sentry/kernel/task_stop.go @@ -0,0 +1,226 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// This file implements task stops, which represent the equivalent of Linux's +// uninterruptible sleep states in a way that is compatible with save/restore. +// Task stops comprise both internal stops (which form part of the task's +// "normal" control flow) and external stops (which do not); see README.md for +// details. +// +// There are multiple interfaces for interacting with stops because there are +// multiple cases to consider: +// +// - A task goroutine can begin a stop on its associated task (e.g. a +// vfork() syscall stopping the calling task until the child task releases its +// MM). In this case, calling Task.interrupt is both unnecessary (the task +// goroutine obviously cannot be blocked in Task.block or executing application +// code) and undesirable (as it may spuriously interrupt a in-progress +// syscall). +// +// Beginning internal stops in this case is implemented by +// Task.beginInternalStop / Task.beginInternalStopLocked. As of this writing, +// there are no instances of this case that begin external stops, except for +// autosave; however, autosave terminates the sentry without ending the +// external stop, so the spurious interrupt is moot. +// +// - An arbitrary goroutine can begin a stop on an unrelated task (e.g. all +// tasks being stopped in preparation for state checkpointing). If the task +// goroutine may be in Task.block or executing application code, it must be +// interrupted by Task.interrupt for it to actually enter the stop; since, +// strictly speaking, we have no way of determining this, we call +// Task.interrupt unconditionally. +// +// Beginning external stops in this case is implemented by +// Task.BeginExternalStop. As of this writing, there are no instances of this +// case that begin internal stops. +// +// - An arbitrary goroutine can end a stop on an unrelated task (e.g. an +// exiting task resuming a sibling task that has been blocked in an execve() +// syscall waiting for other tasks to exit). In this case, Task.endStopCond +// must be notified to kick the task goroutine out of Task.doStop. +// +// Ending internal stops in this case is implemented by +// Task.endInternalStopLocked. Ending external stops in this case is +// implemented by Task.EndExternalStop. +// +// - Hypothetically, a task goroutine can end an internal stop on its +// associated task. As of this writing, there are no instances of this case. +// However, any instances of this case could still use the above functions, +// since notifying Task.endStopCond would be unnecessary but harmless. + +import ( + "fmt" + "sync/atomic" +) + +// A TaskStop is a condition visible to the task control flow graph that +// prevents a task goroutine from running or exiting, i.e. an internal stop. +// +// NOTE: Most TaskStops don't contain any data; they're +// distinguished by their type. The obvious way to implement such a TaskStop +// is: +// +// type groupStop struct{} +// func (groupStop) Killable() bool { return true } +// ... +// t.beginInternalStop(groupStop{}) +// +// However, this doesn't work because the state package can't serialize values, +// only pointers. Furthermore, the correctness of save/restore depends on the +// ability to pass a TaskStop to endInternalStop that will compare equal to the +// TaskStop that was passed to beginInternalStop, even if a save/restore cycle +// occurred between the two. As a result, the current idiom is to always use a +// typecast nil for data-free TaskStops: +// +// type groupStop struct{} +// func (*groupStop) Killable() bool { return true } +// ... +// t.beginInternalStop((*groupStop)(nil)) +// +// This is pretty gross, but the alternatives seem grosser. +type TaskStop interface { + // Killable returns true if Task.Kill should end the stop prematurely. + // Killable is analogous to Linux's TASK_WAKEKILL. + Killable() bool +} + +// beginInternalStop indicates the start of an internal stop that applies to t. +// +// Preconditions: The task must not already be in an internal stop (i.e. t.stop +// == nil). The caller must be running on the task goroutine. +func (t *Task) beginInternalStop(s TaskStop) { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.beginInternalStopLocked(s) +} + +// Preconditions: The signal mutex must be locked. All preconditions for +// Task.beginInternalStop also apply. +func (t *Task) beginInternalStopLocked(s TaskStop) { + if t.stop != nil { + panic(fmt.Sprintf("Attempting to enter internal stop %#v when already in internal stop %#v", s, t.stop)) + } + t.Debugf("Entering internal stop %#v", s) + t.stop = s + t.beginStopLocked() +} + +// endInternalStopLocked indicates the end of an internal stop that applies to +// t. endInternalStopLocked does not wait for the task to resume. +// +// The caller is responsible for ensuring that the internal stop they expect +// actually applies to t; this requires holding the signal mutex which protects +// t.stop, which is why there is no endInternalStop that locks the signal mutex +// for you. +// +// Preconditions: The signal mutex must be locked. The task must be in an +// internal stop (i.e. t.stop != nil). +func (t *Task) endInternalStopLocked() { + if t.stop == nil { + panic("Attempting to leave non-existent internal stop") + } + t.Debugf("Leaving internal stop %#v", t.stop) + t.stop = nil + t.endStopLocked() +} + +// BeginExternalStop indicates the start of an external stop that applies to t. +// BeginExternalStop does not wait for t's task goroutine to stop. +func (t *Task) BeginExternalStop() { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.beginStopLocked() + t.interrupt() +} + +// EndExternalStop indicates the end of an external stop started by a previous +// call to Task.BeginExternalStop. EndExternalStop does not wait for t's task +// goroutine to resume. +func (t *Task) EndExternalStop() { + t.tg.pidns.owner.mu.RLock() + defer t.tg.pidns.owner.mu.RUnlock() + t.tg.signalHandlers.mu.Lock() + defer t.tg.signalHandlers.mu.Unlock() + t.endStopLocked() +} + +// beginStopLocked increments t.stopCount to indicate that a new internal or +// external stop applies to t. +// +// Preconditions: The signal mutex must be locked. +func (t *Task) beginStopLocked() { + if newval := atomic.AddInt32(&t.stopCount, 1); newval <= 0 { + // Most likely overflow. + panic(fmt.Sprintf("Invalid stopCount: %d", newval)) + } +} + +// endStopLocked decerements t.stopCount to indicate that an existing internal +// or external stop no longer applies to t. +// +// Preconditions: The signal mutex must be locked. +func (t *Task) endStopLocked() { + if newval := atomic.AddInt32(&t.stopCount, -1); newval < 0 { + panic(fmt.Sprintf("Invalid stopCount: %d", newval)) + } else if newval == 0 { + t.endStopCond.Signal() + } +} + +// BeginExternalStop indicates the start of an external stop that applies to +// all current and future tasks in ts. BeginExternalStop does not wait for +// task goroutines to stop. +func (ts *TaskSet) BeginExternalStop() { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.stopCount++ + if ts.stopCount <= 0 { + panic(fmt.Sprintf("Invalid stopCount: %d", ts.stopCount)) + } + if ts.Root == nil { + return + } + for t := range ts.Root.tids { + t.tg.signalHandlers.mu.Lock() + t.beginStopLocked() + t.tg.signalHandlers.mu.Unlock() + t.interrupt() + } +} + +// EndExternalStop indicates the end of an external stop started by a previous +// call to TaskSet.BeginExternalStop. EndExternalStop does not wait for task +// goroutines to resume. +func (ts *TaskSet) EndExternalStop() { + ts.mu.Lock() + defer ts.mu.Unlock() + ts.stopCount-- + if ts.stopCount < 0 { + panic(fmt.Sprintf("Invalid stopCount: %d", ts.stopCount)) + } + if ts.Root == nil { + return + } + for t := range ts.Root.tids { + t.tg.signalHandlers.mu.Lock() + t.endStopLocked() + t.tg.signalHandlers.mu.Unlock() + } +} diff --git a/pkg/sentry/kernel/task_syscall.go b/pkg/sentry/kernel/task_syscall.go new file mode 100644 index 000000000..79f4ff60c --- /dev/null +++ b/pkg/sentry/kernel/task_syscall.go @@ -0,0 +1,434 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "os" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bits" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SyscallRestartErrno represents a ERESTART* errno defined in the Linux's kernel +// include/linux/errno.h. These errnos are never returned to userspace +// directly, but are used to communicate the expected behavior of an +// interrupted syscall from the syscall to signal handling. +type SyscallRestartErrno int + +// These numeric values are significant because ptrace syscall exit tracing can +// observe them. +// +// For all of the following errnos, if the syscall is not interrupted by a +// signal delivered to a user handler, the syscall is restarted. +const ( + // ERESTARTSYS is returned by an interrupted syscall to indicate that it + // should be converted to EINTR if interrupted by a signal delivered to a + // user handler without SA_RESTART set, and restarted otherwise. + ERESTARTSYS = SyscallRestartErrno(512) + + // ERESTARTNOINTR is returned by an interrupted syscall to indicate that it + // should always be restarted. + ERESTARTNOINTR = SyscallRestartErrno(513) + + // ERESTARTNOHAND is returned by an interrupted syscall to indicate that it + // should be converted to EINTR if interrupted by a signal delivered to a + // user handler, and restarted otherwise. + ERESTARTNOHAND = SyscallRestartErrno(514) + + // ERESTART_RESTARTBLOCK is returned by an interrupted syscall to indicate + // that it should be restarted using a custom function. The interrupted + // syscall must register a custom restart function by calling + // Task.SetRestartSyscallFn. + ERESTART_RESTARTBLOCK = SyscallRestartErrno(516) +) + +// Error implements error.Error. +func (e SyscallRestartErrno) Error() string { + // Descriptions are borrowed from strace. + switch e { + case ERESTARTSYS: + return "to be restarted if SA_RESTART is set" + case ERESTARTNOINTR: + return "to be restarted" + case ERESTARTNOHAND: + return "to be restarted if no handler" + case ERESTART_RESTARTBLOCK: + return "interrupted by signal" + default: + return "(unknown interrupt error)" + } +} + +// SyscallRestartErrnoFromReturn returns the SyscallRestartErrno represented by +// rv, the value in a syscall return register. +func SyscallRestartErrnoFromReturn(rv uintptr) (SyscallRestartErrno, bool) { + switch int(rv) { + case -int(ERESTARTSYS): + return ERESTARTSYS, true + case -int(ERESTARTNOINTR): + return ERESTARTNOINTR, true + case -int(ERESTARTNOHAND): + return ERESTARTNOHAND, true + case -int(ERESTART_RESTARTBLOCK): + return ERESTART_RESTARTBLOCK, true + default: + return 0, false + } +} + +// SyscallRestartBlock represents the restart block for a syscall restartable +// with a custom function. It encapsulates the state required to restart a +// syscall across a S/R. +type SyscallRestartBlock interface { + Restart(t *Task) (uintptr, error) +} + +// SyscallControl is returned by syscalls to control the behavior of +// Task.doSyscallInvoke. +type SyscallControl struct { + // next is the state that the task goroutine should switch to. If next is + // nil, the task goroutine should continue to syscall exit as usual. + next taskRunState + + // If ignoreReturn is true, Task.doSyscallInvoke should not store any value + // in the task's syscall return value register. + ignoreReturn bool +} + +var ( + // CtrlDoExit is returned by the implementations of the exit and exit_group + // syscalls to enter the task exit path directly, skipping syscall exit + // tracing. + CtrlDoExit = &SyscallControl{next: (*runExit)(nil), ignoreReturn: true} + + // ctrlStopAndReinvokeSyscall is returned by syscalls using the external + // feature before syscall execution. This causes Task.doSyscallInvoke + // to return runSyscallReinvoke, allowing Task.run to check for stops + // before immediately re-invoking the syscall (skipping the re-checking + // of seccomp filters and ptrace which would confuse userspace + // tracing). + ctrlStopAndReinvokeSyscall = &SyscallControl{next: (*runSyscallReinvoke)(nil), ignoreReturn: true} + + // ctrlStopBeforeSyscallExit is returned by syscalls that initiate a stop at + // their end. This causes Task.doSyscallInvoke to return runSyscallExit, rather + // than tail-calling it, allowing stops to be checked before syscall exit. + ctrlStopBeforeSyscallExit = &SyscallControl{next: (*runSyscallExit)(nil)} +) + +func (t *Task) invokeExternal() { + t.BeginExternalStop() + go func() { // S/R-SAFE: External control flow. + defer t.EndExternalStop() + t.SyscallTable().External(t.Kernel()) + }() +} + +func (t *Task) executeSyscall(sysno uintptr, args arch.SyscallArguments) (rval uintptr, ctrl *SyscallControl, err error) { + s := t.SyscallTable() + + fe := s.FeatureEnable.Word(sysno) + + var straceContext interface{} + if bits.IsAnyOn32(fe, StraceEnableBits) { + straceContext = s.Stracer.SyscallEnter(t, sysno, args, fe) + } + + if bits.IsOn32(fe, ExternalBeforeEnable) && (s.ExternalFilterBefore == nil || s.ExternalFilterBefore(t, sysno, args)) { + t.invokeExternal() + // Ensure we check for stops, then invoke the syscall again. + ctrl = ctrlStopAndReinvokeSyscall + } else { + fn := s.Lookup(sysno) + if fn != nil { + // Call our syscall implementation. + rval, ctrl, err = fn(t, args) + } else { + // Use the missing function if not found. + rval, err = t.SyscallTable().Missing(t, sysno, args) + } + } + + if bits.IsOn32(fe, ExternalAfterEnable) && (s.ExternalFilterAfter == nil || s.ExternalFilterAfter(t, sysno, args)) { + t.invokeExternal() + // Don't reinvoke the syscall. + } + + if bits.IsAnyOn32(fe, StraceEnableBits) { + s.Stracer.SyscallExit(straceContext, t, sysno, rval, err) + } + + return +} + +// doSyscall is the entry point for an invocation of a system call specified by +// the current state of t's registers. +// +// The syscall path is very hot; avoid defer. +func (t *Task) doSyscall() taskRunState { + sysno := t.Arch().SyscallNo() + args := t.Arch().SyscallArgs() + + // Tracers expect to see this between when the task traps into the kernel + // to perform a syscall and when the syscall is actually invoked. + // This useless-looking temporary is needed because Go. + tmp := uintptr(syscall.ENOSYS) + t.Arch().SetReturn(-tmp) + + // Check seccomp filters. The nil check is for performance (as seccomp use + // is rare), not needed for correctness. + if t.syscallFilters != nil { + switch r := t.checkSeccompSyscall(int32(sysno), args, usermem.Addr(t.Arch().IP())); r { + case seccompResultDeny: + t.Debugf("Syscall %d: denied by seccomp", sysno) + return (*runSyscallExit)(nil) + case seccompResultAllow: + // ok + case seccompResultKill: + t.Debugf("Syscall %d: killed by seccomp", sysno) + t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)}) + return (*runExit)(nil) + case seccompResultTrace: + t.Debugf("Syscall %d: stopping for PTRACE_EVENT_SECCOMP", sysno) + return (*runSyscallAfterPtraceEventSeccomp)(nil) + default: + panic(fmt.Sprintf("Unknown seccomp result %d", r)) + } + } + + return t.doSyscallEnter(sysno, args) +} + +type runSyscallAfterPtraceEventSeccomp struct{} + +func (*runSyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState { + if t.killed() { + // "[S]yscall-exit-stop is not generated prior to death by SIGKILL." - + // ptrace(2) + return (*runInterrupt)(nil) + } + sysno := t.Arch().SyscallNo() + // "The tracer can skip the system call by changing the syscall number to + // -1." - Documentation/prctl/seccomp_filter.txt + if sysno == ^uintptr(0) { + return (*runSyscallExit)(nil).execute(t) + } + args := t.Arch().SyscallArgs() + return t.doSyscallEnter(sysno, args) +} + +func (t *Task) doSyscallEnter(sysno uintptr, args arch.SyscallArguments) taskRunState { + if next, ok := t.ptraceSyscallEnter(); ok { + return next + } + return t.doSyscallInvoke(sysno, args) +} + +type runSyscallAfterSyscallEnterStop struct{} + +func (*runSyscallAfterSyscallEnterStop) execute(t *Task) taskRunState { + if sig := linux.Signal(t.ptraceCode); sig.IsValid() { + t.tg.signalHandlers.mu.Lock() + t.sendSignalLocked(sigPriv(sig), false /* group */) + t.tg.signalHandlers.mu.Unlock() + } + if t.killed() { + return (*runInterrupt)(nil) + } + sysno := t.Arch().SyscallNo() + if sysno == ^uintptr(0) { + return (*runSyscallExit)(nil) + } + args := t.Arch().SyscallArgs() + return t.doSyscallInvoke(sysno, args) +} + +type runSyscallAfterSysemuStop struct{} + +func (*runSyscallAfterSysemuStop) execute(t *Task) taskRunState { + if sig := linux.Signal(t.ptraceCode); sig.IsValid() { + t.tg.signalHandlers.mu.Lock() + t.sendSignalLocked(sigPriv(sig), false /* group */) + t.tg.signalHandlers.mu.Unlock() + } + if t.killed() { + return (*runInterrupt)(nil) + } + return (*runSyscallExit)(nil).execute(t) +} + +func (t *Task) doSyscallInvoke(sysno uintptr, args arch.SyscallArguments) taskRunState { + rval, ctrl, err := t.executeSyscall(sysno, args) + + if ctrl != nil { + if !ctrl.ignoreReturn { + t.Arch().SetReturn(rval) + } + if ctrl.next != nil { + return ctrl.next + } + } else if err != nil { + t.Arch().SetReturn(uintptr(-t.ExtractErrno(err, int(sysno)))) + t.haveSyscallReturn = true + } else { + t.Arch().SetReturn(rval) + } + + return (*runSyscallExit)(nil).execute(t) +} + +type runSyscallReinvoke struct{} + +func (*runSyscallReinvoke) execute(t *Task) taskRunState { + if t.killed() { + // It's possible that since the last execution, the task has + // been forcible killed. Invoking the system call here could + // result in an infinite loop if it is again preempted by an + // external stop and reinvoked. + return (*runInterrupt)(nil) + } + + sysno := t.Arch().SyscallNo() + args := t.Arch().SyscallArgs() + return t.doSyscallInvoke(sysno, args) +} + +type runSyscallExit struct{} + +func (*runSyscallExit) execute(t *Task) taskRunState { + t.ptraceSyscallExit() + return (*runApp)(nil) +} + +// doVsyscall is the entry point for a vsyscall invocation of syscall sysno, as +// indicated by an execution fault at address addr. doVsyscall returns the +// task's next run state. +func (t *Task) doVsyscall(addr usermem.Addr, sysno uintptr) taskRunState { + // Grab the caller up front, to make sure there's a sensible stack. + caller := t.Arch().Native(uintptr(0)) + if _, err := t.CopyIn(usermem.Addr(t.Arch().Stack()), caller); err != nil { + t.Debugf("vsyscall %d: error reading return address from stack: %v", sysno, err) + t.forceSignal(linux.SIGSEGV, false /* unconditional */) + t.SendSignal(sigPriv(linux.SIGSEGV)) + return (*runApp)(nil) + } + + // For _vsyscalls_, there is no need to translate System V calling convention + // to syscall ABI because they both use RDI, RSI, and RDX for the first three + // arguments and none of the vsyscalls uses more than two arguments. + args := t.Arch().SyscallArgs() + if t.syscallFilters != nil { + switch r := t.checkSeccompSyscall(int32(sysno), args, addr); r { + case seccompResultDeny: + t.Debugf("vsyscall %d, caller %x: denied by seccomp", sysno, t.Arch().Value(caller)) + return (*runApp)(nil) + case seccompResultAllow: + // ok + case seccompResultTrace: + t.Debugf("vsyscall %d, caller %x: stopping for PTRACE_EVENT_SECCOMP", sysno, t.Arch().Value(caller)) + return &runVsyscallAfterPtraceEventSeccomp{addr, sysno, caller} + default: + panic(fmt.Sprintf("Unknown seccomp result %d", r)) + } + } + + return t.doVsyscallInvoke(sysno, args, caller) +} + +type runVsyscallAfterPtraceEventSeccomp struct { + addr usermem.Addr + sysno uintptr + caller interface{} +} + +func (r *runVsyscallAfterPtraceEventSeccomp) execute(t *Task) taskRunState { + if t.killed() { + return (*runInterrupt)(nil) + } + sysno := t.Arch().SyscallNo() + // "... the syscall may not be changed to another system call using the + // orig_rax register. It may only be changed to -1 order [sic] to skip the + // currently emulated call. ... The tracer MUST NOT modify rip or rsp." - + // Documentation/prctl/seccomp_filter.txt. On Linux, changing orig_ax or ip + // causes do_exit(SIGSYS), and changing sp is ignored. + if (sysno != ^uintptr(0) && sysno != r.sysno) || usermem.Addr(t.Arch().IP()) != r.addr { + t.PrepareExit(ExitStatus{Signo: int(linux.SIGSYS)}) + return (*runExit)(nil) + } + if sysno == ^uintptr(0) { + return (*runApp)(nil) + } + return t.doVsyscallInvoke(sysno, t.Arch().SyscallArgs(), r.caller) +} + +func (t *Task) doVsyscallInvoke(sysno uintptr, args arch.SyscallArguments, caller interface{}) taskRunState { + rval, ctrl, err := t.executeSyscall(sysno, args) + if ctrl != nil { + t.Debugf("vsyscall %d, caller %x: syscall control: %v", sysno, t.Arch().Value(caller), ctrl) + // Set the return value. The stack has already been adjusted. + t.Arch().SetReturn(0) + } else if err == nil { + t.Debugf("vsyscall %d, caller %x: successfully emulated syscall", sysno, t.Arch().Value(caller)) + // Set the return value. The stack has already been adjusted. + t.Arch().SetReturn(uintptr(rval)) + } else { + t.Debugf("vsyscall %d, caller %x: emulated syscall returned error: %v", sysno, t.Arch().Value(caller), err) + if err == syserror.EFAULT { + t.forceSignal(linux.SIGSEGV, false /* unconditional */) + t.SendSignal(sigPriv(linux.SIGSEGV)) + // A return is not emulated in this case. + return (*runApp)(nil) + } + t.Arch().SetReturn(uintptr(-t.ExtractErrno(err, int(sysno)))) + } + t.Arch().SetIP(t.Arch().Value(caller)) + t.Arch().SetStack(t.Arch().Stack() + uintptr(t.Arch().Width())) + return (*runApp)(nil) +} + +// ExtractErrno extracts an integer error number from the error. +// The syscall number is purely for context in the error case. Use -1 if +// syscall number is unknown. +func (t *Task) ExtractErrno(err error, sysno int) int { + switch err := err.(type) { + case nil: + return 0 + case syscall.Errno: + return int(err) + case SyscallRestartErrno: + return int(err) + case *memmap.BusError: + // Bus errors may generate SIGBUS, but for syscalls they still + // return EFAULT. See case in task_run.go where the fault is + // handled (and the SIGBUS is delivered). + return int(syscall.EFAULT) + case *os.PathError: + return t.ExtractErrno(err.Err, sysno) + case *os.LinkError: + return t.ExtractErrno(err.Err, sysno) + case *os.SyscallError: + return t.ExtractErrno(err.Err, sysno) + default: + if errno, ok := syserror.TranslateError(err); ok { + return int(errno) + } + } + panic(fmt.Sprintf("Unknown syscall %d error: %v", sysno, err)) +} diff --git a/pkg/sentry/kernel/task_test.go b/pkg/sentry/kernel/task_test.go new file mode 100644 index 000000000..82ef858a1 --- /dev/null +++ b/pkg/sentry/kernel/task_test.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" +) + +func TestTaskCPU(t *testing.T) { + for _, test := range []struct { + mask sched.CPUSet + tid ThreadID + cpu int32 + }{ + { + mask: []byte{0xff}, + tid: 1, + cpu: 0, + }, + { + mask: []byte{0xff}, + tid: 10, + cpu: 1, + }, + { + // more than 8 cpus. + mask: []byte{0xff, 0xff}, + tid: 10, + cpu: 9, + }, + { + // missing the first cpu. + mask: []byte{0xfe}, + tid: 1, + cpu: 1, + }, + { + mask: []byte{0xfe}, + tid: 10, + cpu: 3, + }, + { + // missing the fifth cpu. + mask: []byte{0xef}, + tid: 10, + cpu: 2, + }, + } { + assigned := assignCPU(test.mask, test.tid) + if test.cpu != assigned { + t.Errorf("assignCPU(%v, %v) got %v, want %v", test.mask, test.tid, assigned, test.cpu) + } + } + +} diff --git a/pkg/sentry/kernel/task_usermem.go b/pkg/sentry/kernel/task_usermem.go new file mode 100644 index 000000000..7a62ab674 --- /dev/null +++ b/pkg/sentry/kernel/task_usermem.go @@ -0,0 +1,298 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// _MAX_RW_COUNT is the maximum size in bytes of a single read or write. +// Reads and writes that exceed this size may be silently truncated. +// (Linux: include/linux/fs.h:MAX_RW_COUNT) +var _MAX_RW_COUNT = int(usermem.Addr(math.MaxInt32).RoundDown()) + +// Activate ensures that the task has an active address space. +func (t *Task) Activate() { + if mm := t.MemoryManager(); mm != nil { + if err := mm.Activate(); err != nil { + panic("unable to activate mm: " + err.Error()) + } + } +} + +// Deactivate relinquishes the task's active address space. +func (t *Task) Deactivate() { + if mm := t.MemoryManager(); mm != nil { + if err := mm.Deactivate(); err != nil { + panic("unable to deactivate mm: " + err.Error()) + } + } +} + +// CopyIn copies a fixed-size value or slice of fixed-size values in from the +// task's memory. The copy will fail with syscall.EFAULT if it traverses user +// memory that is unmapped or not readable by the user. +// +// This Task's AddressSpace must be active. +func (t *Task) CopyIn(addr usermem.Addr, dst interface{}) (int, error) { + return usermem.CopyObjectIn(t, t.MemoryManager(), addr, dst, usermem.IOOpts{ + AddressSpaceActive: true, + }) +} + +// CopyInBytes is a fast version of CopyIn if the caller can serialize the +// data without reflection and pass in a byte slice. +// +// This Task's AddressSpace must be active. +func (t *Task) CopyInBytes(addr usermem.Addr, dst []byte) (int, error) { + return t.MemoryManager().CopyIn(t, addr, dst, usermem.IOOpts{ + AddressSpaceActive: true, + }) +} + +// CopyOut copies a fixed-size value or slice of fixed-size values out to the +// task's memory. The copy will fail with syscall.EFAULT if it traverses user +// memory that is unmapped or not writeable by the user. +// +// This Task's AddressSpace must be active. +func (t *Task) CopyOut(addr usermem.Addr, src interface{}) (int, error) { + return usermem.CopyObjectOut(t, t.MemoryManager(), addr, src, usermem.IOOpts{ + AddressSpaceActive: true, + }) +} + +// CopyOutBytes is a fast version of CopyOut if the caller can serialize the +// data without reflection and pass in a byte slice. +// +// This Task's AddressSpace must be active. +func (t *Task) CopyOutBytes(addr usermem.Addr, src []byte) (int, error) { + return t.MemoryManager().CopyOut(t, addr, src, usermem.IOOpts{ + AddressSpaceActive: true, + }) +} + +// CopyInString copies a NUL-terminated string of length at most maxlen in from +// the task's memory. The copy will fail with syscall.EFAULT if it traverses +// user memory that is unmapped or not readable by the user. +// +// This Task's AddressSpace must be active. +func (t *Task) CopyInString(addr usermem.Addr, maxlen int) (string, error) { + return usermem.CopyStringIn(t, t.MemoryManager(), addr, maxlen, usermem.IOOpts{ + AddressSpaceActive: true, + }) +} + +// CopyInVector copies a NULL-terminated vector of strings from the task's +// memory. The copy will fail with syscall.EFAULT if it traverses +// user memory that is unmapped or not readable by the user. +// +// maxElemSize is the maximum size of each individual element. +// +// maxTotalSize is the maximum total length of all elements plus the total +// number of elements. For example, the following strings correspond to +// the following set of sizes: +// +// { "a", "b", "c" } => 6 (3 for lengths, 3 for elements) +// { "abc" } => 4 (3 for length, 1 for elements) +// +// This Task's AddressSpace must be active. +func (t *Task) CopyInVector(addr usermem.Addr, maxElemSize, maxTotalSize int) ([]string, error) { + var v []string + for { + argAddr := t.Arch().Native(0) + if _, err := t.CopyIn(addr, argAddr); err != nil { + return v, err + } + if t.Arch().Value(argAddr) == 0 { + break + } + // Each string has a zero terminating byte counted, so copying out a string + // requires at least one byte of space. Also, see the calculation below. + if maxTotalSize <= 0 { + return nil, syserror.ENOMEM + } + thisMax := maxElemSize + if maxTotalSize < thisMax { + thisMax = maxTotalSize + } + arg, err := t.CopyInString(usermem.Addr(t.Arch().Value(argAddr)), thisMax) + if err != nil { + return v, err + } + v = append(v, arg) + addr += usermem.Addr(t.Arch().Width()) + maxTotalSize -= len(arg) + 1 + } + return v, nil +} + +// CopyOutIovecs converts src to an array of struct iovecs and copies it to the +// memory mapped at addr. +// +// Preconditions: As for usermem.IO.CopyOut. The caller must be running on the +// task goroutine. t's AddressSpace must be active. +func (t *Task) CopyOutIovecs(addr usermem.Addr, src usermem.AddrRangeSeq) error { + switch t.Arch().Width() { + case 8: + const itemLen = 16 + if _, ok := addr.AddLength(uint64(src.NumRanges()) * itemLen); !ok { + return syserror.EFAULT + } + + b := t.CopyScratchBuffer(itemLen) + for ; !src.IsEmpty(); src = src.Tail() { + ar := src.Head() + usermem.ByteOrder.PutUint64(b[0:8], uint64(ar.Start)) + usermem.ByteOrder.PutUint64(b[8:16], uint64(ar.Length())) + if _, err := t.CopyOutBytes(addr, b); err != nil { + return err + } + addr += itemLen + } + + default: + return syserror.ENOSYS + } + + return nil +} + +// CopyInIovecs copies an array of numIovecs struct iovecs from the memory +// mapped at addr, converts them to usermem.AddrRanges, and returns them as a +// usermem.AddrRangeSeq. +// +// CopyInIovecs shares the following properties with Linux's +// lib/iov_iter.c:import_iovec() => fs/read_write.c:rw_copy_check_uvector(): +// +// - If the length of any AddrRange would exceed the range of an ssize_t, +// CopyInIovecs returns EINVAL. +// +// - If the length of any AddrRange would cause its end to overflow, +// CopyInIovecs returns EFAULT. +// +// - The combined length of all AddrRanges is limited to _MAX_RW_COUNT. If the +// combined length of all AddrRanges would otherwise exceed this amount, ranges +// beyond _MAX_RW_COUNT are silently truncated. +// +// Preconditions: As for usermem.IO.CopyIn. The caller must be running on the +// task goroutine. t's AddressSpace must be active. +func (t *Task) CopyInIovecs(addr usermem.Addr, numIovecs int) (usermem.AddrRangeSeq, error) { + if numIovecs == 0 { + return usermem.AddrRangeSeq{}, nil + } + + var dst []usermem.AddrRange + if numIovecs > 1 { + dst = make([]usermem.AddrRange, 0, numIovecs) + } + + switch t.Arch().Width() { + case 8: + const itemLen = 16 + if _, ok := addr.AddLength(uint64(numIovecs) * itemLen); !ok { + return usermem.AddrRangeSeq{}, syserror.EFAULT + } + + b := t.CopyScratchBuffer(itemLen) + for i := 0; i < numIovecs; i++ { + if _, err := t.CopyInBytes(addr, b); err != nil { + return usermem.AddrRangeSeq{}, err + } + + base := usermem.Addr(usermem.ByteOrder.Uint64(b[0:8])) + length := usermem.ByteOrder.Uint64(b[8:16]) + if length > math.MaxInt64 { + return usermem.AddrRangeSeq{}, syserror.EINVAL + } + ar, ok := base.ToRange(length) + if !ok { + return usermem.AddrRangeSeq{}, syserror.EFAULT + } + + if numIovecs == 1 { + // Special case to avoid allocating dst. + return usermem.AddrRangeSeqOf(ar).TakeFirst(_MAX_RW_COUNT), nil + } + dst = append(dst, ar) + + addr += itemLen + } + + default: + return usermem.AddrRangeSeq{}, syserror.ENOSYS + } + + // Truncate to _MAX_RW_COUNT. + var total uint64 + for i := range dst { + dstlen := uint64(dst[i].Length()) + if rem := uint64(_MAX_RW_COUNT) - total; rem < dstlen { + dst[i].End -= usermem.Addr(dstlen - rem) + dstlen = rem + } + total += dstlen + } + + return usermem.AddrRangeSeqFromSlice(dst), nil +} + +// SingleIOSequence returns a usermem.IOSequence representing [addr, +// addr+length) in t's address space. If length exceeds _MAX_RW_COUNT, it is +// silently truncated. +// +// SingleIOSequence is analogous to Linux's +// lib/iov_iter.c:import_single_range(). (Note that the non-vectorized read and +// write syscalls in Linux do not use import_single_range(), but are still +// truncated to _MAX_RW_COUNT by fs/read_write.c:rw_verify_area().) +func (t *Task) SingleIOSequence(addr usermem.Addr, length int, opts usermem.IOOpts) (usermem.IOSequence, error) { + if length > _MAX_RW_COUNT { + length = _MAX_RW_COUNT + } + ar, ok := addr.ToRange(uint64(length)) + if !ok { + return usermem.IOSequence{}, syserror.EFAULT + } + return usermem.IOSequence{ + IO: t.MemoryManager(), + Addrs: usermem.AddrRangeSeqOf(ar), + Opts: opts, + }, nil +} + +// IovecsIOSequence returns a usermem.IOSequence representing the array of +// iovcnt struct iovecs at addr in t's address space. opts applies to the +// returned IOSequence, not the reading of the struct iovec array. +// +// IovecsIOSequence is analogous to Linux's lib/iov_iter.c:import_iovec(). +// +// Preconditions: As for Task.CopyInIovecs. +func (t *Task) IovecsIOSequence(addr usermem.Addr, iovcnt int, opts usermem.IOOpts) (usermem.IOSequence, error) { + if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV { + return usermem.IOSequence{}, syserror.EINVAL + } + ars, err := t.CopyInIovecs(addr, iovcnt) + if err != nil { + return usermem.IOSequence{}, err + } + return usermem.IOSequence{ + IO: t.MemoryManager(), + Addrs: ars, + Opts: opts, + }, nil +} diff --git a/pkg/sentry/kernel/thread_group.go b/pkg/sentry/kernel/thread_group.go new file mode 100644 index 000000000..8fffd3446 --- /dev/null +++ b/pkg/sentry/kernel/thread_group.go @@ -0,0 +1,269 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" +) + +// A ThreadGroup is a logical grouping of tasks that has widespread +// significance to other kernel features (e.g. signal handling). ("Thread +// groups" are usually called "processes" in userspace documentation.) +// +// ThreadGroup is a superset of Linux's struct signal_struct. +type ThreadGroup struct { + threadGroupNode + + // signalHandlers is the set of signal handlers used by every task in this + // thread group. (signalHandlers may also be shared with other thread + // groups.) + // + // signalHandlers.mu (hereafter "the signal mutex") protects state related + // to signal handling, as well as state that usually needs to be atomic + // with signal handling, for all ThreadGroups and Tasks using + // signalHandlers. (This is analogous to Linux's use of struct + // sighand_struct::siglock.) + // + // The signalHandlers pointer can only be mutated during an execve + // (Task.finishExec). Consequently, when it's possible for a task in the + // thread group to be completing an execve, signalHandlers is protected by + // the owning TaskSet.mu. Otherwise, it is possible to read the + // signalHandlers pointer without synchronization. In particular, + // completing an execve requires that all other tasks in the thread group + // have exited, so task goroutines do not need the owning TaskSet.mu to + // read the signalHandlers pointer of their thread groups. + signalHandlers *SignalHandlers + + // pendingSignals is the set of pending signals that may be handled by any + // task in this thread group. + // + // pendingSignals is protected by the signal mutex. + pendingSignals pendingSignals + + // lastTimerSignalTask records the last task we deliver a process timer signal to. + // Please see SendTimerSignal for more details. + // + // lastTimerSignalTask is protected by the signal mutex. + lastTimerSignalTask *Task + + // groupStopPhase indicates the state of a group stop in progress on the + // thread group, if any. + // + // groupStopPhase is protected by the signal mutex. + groupStopPhase groupStopPhase + + // groupStopSignal is the signal that caused a group stop to be initiated. + // groupStopSignal is only meaningful if groupStopPhase is + // groupStopInitiated or groupStopComplete. + // + // groupStopSignal is protected by the signal mutex. + groupStopSignal linux.Signal + + // groupStopCount is the number of non-exited tasks in the thread group + // that have acknowledged an initiated group stop. groupStopCount is only + // meaningful if groupStopPhase is groupStopInitiated. + // + // groupStopCount is protected by the signal mutex. + groupStopCount int + + // If groupStopWaitable is true, the thread group is indicating a waitable + // group stop event (as defined by EventChildGroupStop). + // + // Linux represents the analogous state as SIGNAL_STOP_STOPPED being set + // and group_exit_code being non-zero. + // + // groupStopWaitable is protected by the signal mutex. + groupStopWaitable bool + + // If groupContNotify is true, then a SIGCONT has recently ended a group + // stop on this thread group, and the first task to observe it should + // notify its parent. + // + // groupContNotify is protected by the signal mutex. + groupContNotify bool + + // If groupContNotify is true, groupContInterrupted is true iff SIGCONT + // ended a group stop in phase groupStopInitiated. If groupContNotify is + // false, groupContInterrupted is meaningless. + // + // Analogues in Linux: + // + // - groupContNotify && groupContInterrupted is represented by + // SIGNAL_CLD_STOPPED. + // + // - groupContNotify && !groupContInterrupted is represented by + // SIGNAL_CLD_CONTINUED. + // + // - !groupContNotify is represented by neither flag being set. + // + // groupContInterrupted is protected by the signal mutex. + groupContInterrupted bool + + // If groupContWaitable is true, the thread group is indicating a waitable + // continue event (as defined by EventGroupContinue). + // + // groupContWaitable is analogous to Linux's SIGNAL_STOP_CONTINUED. + // + // groupContWaitable is protected by the signal mutex. + groupContWaitable bool + + // exiting is true if all tasks in the ThreadGroup should exit. exiting is + // analogous to Linux's SIGNAL_GROUP_EXIT. + // + // exiting is protected by the signal mutex. exiting can only transition + // from false to true. + exiting bool + + // exitStatus is the thread group's exit status. + // + // While exiting is false, exitStatus is protected by the signal mutex. + // When exiting becomes true, exitStatus becomes immutable. + exitStatus ExitStatus + + // terminationSignal is the signal that this thread group's leader will + // send to its parent when it exits. + // + // terminationSignal is protected by the TaskSet mutex. + terminationSignal linux.Signal + + // liveGoroutines is the number of non-exited task goroutines in the thread + // group. + // + // liveGoroutines is not saved; it is reset as task goroutines are + // restarted by Task.Start. + liveGoroutines sync.WaitGroup `state:"nosave"` + + // tm contains process timers. TimerManager fields are immutable. + tm TimerManager + + // exitedCPUStats is the CPU usage for all exited tasks in the thread + // group. exitedCPUStats is protected by the TaskSet mutex. + exitedCPUStats usage.CPUStats + + // childCPUStats is the CPU usage of all joined descendants of this thread + // group. childCPUStats is protected by the TaskSet mutex. + childCPUStats usage.CPUStats + + // ioUsage is the I/O usage for all exited tasks in the thread group. + // The ioUsage pointer is immutable. + ioUsage *usage.IO + + // maxRSS is the historical maximum resident set size of the thread group, updated when: + // + // - A task in the thread group exits, since after all tasks have + // exited the MemoryManager is no longer reachable. + // + // - The thread group completes an execve, since this changes + // MemoryManagers. + // + // maxRSS is protected by the TaskSet mutex. + maxRSS uint64 + + // childMaxRSS is the maximum resident set size in bytes of all joined + // descendants of this thread group. + // + // childMaxRSS is protected by the TaskSet mutex. + childMaxRSS uint64 + + // Resource limits for this ThreadGroup. The limits pointer is immutable. + limits *limits.LimitSet + + // processGroup is the processGroup for this thread group. + // + // processGroup is protected by the TaskSet mutex. + processGroup *ProcessGroup + + // execed indicates an exec has occurred since creation. This will be + // set by finishExec, and new TheadGroups will have this field cleared. + // When execed is set, the processGroup may no longer be changed. + // + // execed is protected by the TaskSet mutex. + execed bool + + // rscr is the thread group's RSEQ critical region. + rscr atomic.Value `state:".(*RSEQCriticalRegion)"` +} + +// NewThreadGroup returns a new, empty thread group in PID namespace ns. The +// thread group leader will send its parent terminationSignal when it exits. +// The new thread group isn't visible to the system until a task has been +// created inside of it by a successful call to TaskSet.NewTask. +func NewThreadGroup(ns *PIDNamespace, sh *SignalHandlers, terminationSignal linux.Signal, limits *limits.LimitSet, monotonicClock *timekeeperClock) *ThreadGroup { + tg := &ThreadGroup{ + threadGroupNode: threadGroupNode{ + pidns: ns, + }, + signalHandlers: sh, + terminationSignal: terminationSignal, + ioUsage: &usage.IO{}, + limits: limits, + } + tg.tm = newTimerManager(tg, monotonicClock) + tg.rscr.Store(&RSEQCriticalRegion{}) + return tg +} + +// saveRscr is invopked by stateify. +func (tg *ThreadGroup) saveRscr() *RSEQCriticalRegion { + return tg.rscr.Load().(*RSEQCriticalRegion) +} + +// loadRscr is invoked by stateify. +func (tg *ThreadGroup) loadRscr(rscr *RSEQCriticalRegion) { + tg.rscr.Store(rscr) +} + +// SignalHandlers returns the signal handlers used by tg. +// +// Preconditions: The caller must provide the synchronization required to read +// tg.signalHandlers, as described in the field's comment. +func (tg *ThreadGroup) SignalHandlers() *SignalHandlers { + return tg.signalHandlers +} + +// Timer returns tg's timers. +func (tg *ThreadGroup) Timer() *TimerManager { + return &tg.tm +} + +// Limits returns tg's limits. +func (tg *ThreadGroup) Limits() *limits.LimitSet { + return tg.limits +} + +// release releases the thread group's resources. +func (tg *ThreadGroup) release() { + // This must be done without holding the TaskSet mutex since thread group + // timers call SendSignal with Timer.mu locked. + tg.tm.destroy() +} + +// forEachChildThreadGroupLocked indicates over all child ThreadGroups. +// +// Precondition: TaskSet.mu must be held. +func (tg *ThreadGroup) forEachChildThreadGroupLocked(fn func(*ThreadGroup)) { + for t := tg.tasks.Front(); t != nil; t = t.Next() { + for child := range t.children { + if child == child.tg.leader { + fn(child.tg) + } + } + } +} diff --git a/pkg/sentry/kernel/threads.go b/pkg/sentry/kernel/threads.go new file mode 100644 index 000000000..440da9dad --- /dev/null +++ b/pkg/sentry/kernel/threads.go @@ -0,0 +1,443 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// TasksLimit is the maximum number of threads for untrusted application. +// Linux doesn't really limit this directly, rather it is limited by total +// memory size, stacks allocated and a global maximum. There's no real reason +// for us to limit it either, (esp. since threads are backed by go routines), +// and we would expect to hit resource limits long before hitting this number. +// However, for correctness, we still check that the user doesn't exceed this +// number. +// +// Note that because of the way futexes are implemented, there *are* in fact +// serious restrictions on valid thread IDs. They are limited to 2^30 - 1 +// (kernel/fork.c:MAX_THREADS). +const TasksLimit = (1 << 16) + +// ThreadID is a generic thread identifier. +type ThreadID int32 + +// String returns a decimal representation of the ThreadID. +func (tid ThreadID) String() string { + return fmt.Sprintf("%d", tid) +} + +// InitTID is the TID given to the first task added to each PID namespace. The +// thread group led by InitTID is called the namespace's init process. The +// death of a PID namespace's init process causes all tasks visible in that +// namespace to be killed. +const InitTID ThreadID = 1 + +// A TaskSet comprises all tasks in a system. +type TaskSet struct { + // mu protects all relationships betweens tasks and thread groups in the + // TaskSet. (mu is approximately equivalent to Linux's tasklist_lock.) + mu sync.RWMutex `state:"nosave"` + + // Root is the root PID namespace, in which all tasks in the TaskSet are + // visible. The Root pointer is immutable. + Root *PIDNamespace + + // sessions is the set of all sessions. + sessions sessionList + + // stopCount is the number of active external stops applicable to all tasks + // in the TaskSet (calls to TaskSet.BeginExternalStop that have not been + // paired with a call to TaskSet.EndExternalStop). stopCount is protected + // by mu. + // + // stopCount is not saved for the same reason as Task.stopCount; it is + // always reset to zero after restore. + stopCount int32 `state:"nosave"` + + // liveGoroutines is the number of non-exited task goroutines in the + // TaskSet. + // + // liveGoroutines is not saved; it is reset as task goroutines are + // restarted by Task.Start. + liveGoroutines sync.WaitGroup `state:"nosave"` + + // runningGoroutines is the number of running task goroutines in the + // TaskSet. + // + // runningGoroutines is not saved; its counter value is required to be zero + // at time of save (but note that this is not necessarily the same thing as + // sync.WaitGroup's zero value). + runningGoroutines sync.WaitGroup `state:"nosave"` +} + +// newTaskSet returns a new, empty TaskSet. +func newTaskSet() *TaskSet { + ts := &TaskSet{} + ts.Root = newPIDNamespace(ts, nil /* parent */, auth.NewRootUserNamespace()) + return ts +} + +// forEachThreadGroupLocked applies f to each thread group in ts. +// +// Preconditions: ts.mu must be locked (for reading or writing). +func (ts *TaskSet) forEachThreadGroupLocked(f func(tg *ThreadGroup)) { + for t := range ts.Root.tids { + if t == t.tg.leader { + f(t.tg) + } + } +} + +// A PIDNamespace represents a PID namespace, a bimap between thread IDs and +// tasks. See the pid_namespaces(7) man page for further details. +// +// N.B. A task is said to be visible in a PID namespace if the PID namespace +// contains a thread ID that maps to that task. +type PIDNamespace struct { + // owner is the TaskSet that this PID namespace belongs to. The owner + // pointer is immutable. + owner *TaskSet + + // parent is the PID namespace of the process that created this one. If + // this is the root PID namespace, parent is nil. The parent pointer is + // immutable. + // + // Invariant: All tasks that are visible in this namespace are also visible + // in all ancestor namespaces. + parent *PIDNamespace + + // userns is the user namespace with which this PID namespace is + // associated. Privileged operations on this PID namespace must have + // appropriate capabilities in userns. The userns pointer is immutable. + userns *auth.UserNamespace + + // The following fields are protected by owner.mu. + + // last is the last ThreadID to be allocated in this namespace. + last ThreadID + + // tasks is a mapping from ThreadIDs in this namespace to tasks visible in + // the namespace. + tasks map[ThreadID]*Task + + // tids is a mapping from tasks visible in this namespace to their + // identifiers in this namespace. + tids map[*Task]ThreadID + + // sessions is a mapping from SessionIDs in this namespace to sessions + // visible in the namespace. + sessions map[SessionID]*Session + + // sids is a mapping from sessions visible in this namespace to their + // identifiers in this namespace. + sids map[*Session]SessionID + + // processGroups is a mapping from ProcessGroupIDs in this namespace to + // process groups visible in the namespace. + processGroups map[ProcessGroupID]*ProcessGroup + + // pgids is a mapping from process groups visible in this namespace to + // their identifiers in this namespace. + pgids map[*ProcessGroup]ProcessGroupID + + // exiting indicates that the namespace's init process is exiting or has + // exited. + exiting bool +} + +func newPIDNamespace(ts *TaskSet, parent *PIDNamespace, userns *auth.UserNamespace) *PIDNamespace { + return &PIDNamespace{ + owner: ts, + parent: parent, + userns: userns, + tasks: make(map[ThreadID]*Task), + tids: make(map[*Task]ThreadID), + sessions: make(map[SessionID]*Session), + sids: make(map[*Session]SessionID), + processGroups: make(map[ProcessGroupID]*ProcessGroup), + pgids: make(map[*ProcessGroup]ProcessGroupID), + } +} + +// NewChild returns a new, empty PID namespace that is a child of ns. Authority +// over the new PID namespace is controlled by userns. +func (ns *PIDNamespace) NewChild(userns *auth.UserNamespace) *PIDNamespace { + return newPIDNamespace(ns.owner, ns, userns) +} + +// TaskWithID returns the task with thread ID tid in PID namespace ns. If no +// task has that TID, TaskWithID returns nil. +func (ns *PIDNamespace) TaskWithID(tid ThreadID) *Task { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + return ns.tasks[tid] +} + +// ThreadGroupWithID returns the thread group lead by the task with thread ID +// tid in PID namespace ns. If no task has that TID, or if the task with that +// TID is not a thread group leader, ThreadGroupWithID returns nil. +func (ns *PIDNamespace) ThreadGroupWithID(tid ThreadID) *ThreadGroup { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + t := ns.tasks[tid] + if t == nil { + return nil + } + if t != t.tg.leader { + return nil + } + return t.tg +} + +// IDOfTask returns the TID assigned to the given task in PID namespace ns. If +// the task is not visible in that namespace, IDOfTask returns 0. (This return +// value is significant in some cases, e.g. getppid() is documented as +// returning 0 if the caller's parent is in an ancestor namespace and +// consequently not visible to the caller.) If the task is nil, IDOfTask returns +// 0. +func (ns *PIDNamespace) IDOfTask(t *Task) ThreadID { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + return ns.tids[t] +} + +// IDOfThreadGroup returns the TID assigned to tg's leader in PID namespace ns. +// If the task is not visible in that namespace, IDOfThreadGroup returns 0. +func (ns *PIDNamespace) IDOfThreadGroup(tg *ThreadGroup) ThreadID { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + return ns.tids[tg.leader] +} + +// Tasks returns a snapshot of the tasks in ns. +func (ns *PIDNamespace) Tasks() []*Task { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + tasks := make([]*Task, 0, len(ns.tasks)) + for t := range ns.tids { + tasks = append(tasks, t) + } + return tasks +} + +// ThreadGroups returns a snapshot of the thread groups in ns. +func (ns *PIDNamespace) ThreadGroups() []*ThreadGroup { + ns.owner.mu.RLock() + defer ns.owner.mu.RUnlock() + var tgs []*ThreadGroup + for t := range ns.tids { + if t == t.tg.leader { + tgs = append(tgs, t.tg) + } + } + return tgs +} + +// UserNamespace returns the user namespace associated with PID namespace ns. +func (ns *PIDNamespace) UserNamespace() *auth.UserNamespace { + return ns.userns +} + +// A threadGroupNode defines the relationship between a thread group and the +// rest of the system. Conceptually, threadGroupNode is data belonging to the +// owning TaskSet, as if TaskSet contained a field `nodes +// map[*ThreadGroup]*threadGroupNode`. However, for practical reasons, +// threadGroupNode is embedded in the ThreadGroup it represents. +// (threadGroupNode is an anonymous field in ThreadGroup; this is to expose +// threadGroupEntry's methods on ThreadGroup to make it implement +// threadGroupLinker.) +type threadGroupNode struct { + // pidns is the PID namespace containing the thread group and all of its + // member tasks. The pidns pointer is immutable. + pidns *PIDNamespace + + // eventQueue is notified whenever a event of interest to Task.Wait occurs + // in a child of this thread group, or a ptrace tracee of a task in this + // thread group. Events are defined in task_exit.go. + // + // Note that we cannot check and save this wait queue similarly to other + // wait queues, as the queue will not be empty by the time of saving, due + // to the wait sourced from Exec(). + eventQueue waiter.Queue `state:"nosave"` + + // leader is the thread group's leader, which is the oldest task in the + // thread group; usually the last task in the thread group to call + // execve(), or if no such task exists then the first task in the thread + // group, which was created by a call to fork() or clone() without + // CLONE_THREAD. Once a thread group has been made visible to the rest of + // the system by TaskSet.newTask, leader is never nil. + // + // Note that it's possible for the leader to exit without causing the rest + // of the thread group to exit; in such a case, leader will still be valid + // and non-nil, but leader will not be in tasks. + // + // leader is protected by the TaskSet mutex. + leader *Task + + // If execing is not nil, it is a task in the thread group that has killed + // all other tasks so that it can become the thread group leader and + // perform an execve. (execing may already be the thread group leader.) + // + // execing is analogous to Linux's signal_struct::group_exit_task. + // + // execing is protected by the TaskSet mutex. + execing *Task + + // tasks is all tasks in the thread group that have not yet been reaped. + // + // tasks is protected by both the TaskSet mutex and the signal mutex: + // Mutating tasks requires locking the TaskSet mutex for writing *and* + // locking the signal mutex. Reading tasks requires locking the TaskSet + // mutex *or* locking the signal mutex. + tasks taskList + + // tasksCount is the number of tasks in the thread group that have not yet + // been reaped; equivalently, tasksCount is the number of tasks in tasks. + // + // tasksCount is protected by both the TaskSet mutex and the signal mutex, + // as with tasks. + tasksCount int + + // liveTasks is the number of tasks in the thread group that have not yet + // reached TaskExitZombie. + // + // liveTasks is protected by the TaskSet mutex (NOT the signal mutex). + liveTasks int + + // activeTasks is the number of tasks in the thread group that have not yet + // reached TaskExitInitiated. + // + // activeTasks is protected by both the TaskSet mutex and the signal mutex, + // as with tasks. + activeTasks int +} + +// PIDNamespace returns the PID namespace containing tg. +func (tg *ThreadGroup) PIDNamespace() *PIDNamespace { + return tg.pidns +} + +// TaskSet returns the TaskSet containing tg. +func (tg *ThreadGroup) TaskSet() *TaskSet { + return tg.pidns.owner +} + +// Leader returns tg's leader. +func (tg *ThreadGroup) Leader() *Task { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.leader +} + +// Count returns the number of non-exited threads in the group. +func (tg *ThreadGroup) Count() int { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + var count int + for t := tg.tasks.Front(); t != nil; t = t.Next() { + count++ + } + return count +} + +// MemberIDs returns a snapshot of the ThreadIDs (in PID namespace pidns) for +// all tasks in tg. +func (tg *ThreadGroup) MemberIDs(pidns *PIDNamespace) []ThreadID { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + + var tasks []ThreadID + for t := tg.tasks.Front(); t != nil; t = t.Next() { + if id, ok := pidns.tids[t]; ok { + tasks = append(tasks, id) + } + } + return tasks +} + +// ID returns tg's leader's thread ID in its own PID namespace. If tg's leader +// is dead, ID returns 0. +func (tg *ThreadGroup) ID() ThreadID { + tg.pidns.owner.mu.RLock() + defer tg.pidns.owner.mu.RUnlock() + return tg.pidns.tids[tg.leader] +} + +// A taskNode defines the relationship between a task and the rest of the +// system. The comments on threadGroupNode also apply to taskNode. +type taskNode struct { + // tg is the thread group that this task belongs to. The tg pointer is + // immutable. + tg *ThreadGroup `state:"wait"` + + // taskEntry links into tg.tasks. Note that this means that + // Task.Next/Prev/SetNext/SetPrev refer to sibling tasks in the same thread + // group. See threadGroupNode.tasks for synchronization info. + taskEntry + + // parent is the task's parent. parent may be nil. + // + // parent is protected by the TaskSet mutex. + parent *Task + + // children is this task's children. + // + // children is protected by the TaskSet mutex. + children map[*Task]struct{} + + // If childPIDNamespace is not nil, all new tasks created by this task will + // be members of childPIDNamespace rather than this one. (As a corollary, + // this task becomes unable to create sibling tasks in the same thread + // group.) + // + // childPIDNamespace is exclusive to the task goroutine. + childPIDNamespace *PIDNamespace +} + +// ThreadGroup returns the thread group containing t. +func (t *Task) ThreadGroup() *ThreadGroup { + return t.tg +} + +// PIDNamespace returns the PID namespace containing t. +func (t *Task) PIDNamespace() *PIDNamespace { + return t.tg.pidns +} + +// TaskSet returns the TaskSet containing t. +func (t *Task) TaskSet() *TaskSet { + return t.tg.pidns.owner +} + +// Timekeeper returns the system Timekeeper. +func (t *Task) Timekeeper() *Timekeeper { + return t.k.timekeeper +} + +// Parent returns t's parent. +func (t *Task) Parent() *Task { + return t.parent +} + +// ThreadID returns t's thread ID in its own PID namespace. If the task is +// dead, ThreadID returns 0. +func (t *Task) ThreadID() ThreadID { + return t.tg.pidns.IDOfTask(t) +} diff --git a/pkg/sentry/kernel/time/BUILD b/pkg/sentry/kernel/time/BUILD new file mode 100644 index 000000000..84f31b2dc --- /dev/null +++ b/pkg/sentry/kernel/time/BUILD @@ -0,0 +1,32 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "time_state", + srcs = [ + "time.go", + ], + out = "time_state.go", + package = "time", +) + +go_library( + name = "time", + srcs = [ + "context.go", + "time.go", + "time_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/state", + "//pkg/syserror", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/kernel/time/context.go b/pkg/sentry/kernel/time/context.go new file mode 100644 index 000000000..ac4dc01d8 --- /dev/null +++ b/pkg/sentry/kernel/time/context.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the time package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxRealtimeClock is a Context.Value key for the current real time. + CtxRealtimeClock contextID = iota +) + +// RealtimeClockFromContext returns the real time clock associated with context +// ctx. +func RealtimeClockFromContext(ctx context.Context) Clock { + if v := ctx.Value(CtxRealtimeClock); v != nil { + return v.(Clock) + } + return nil +} + +// NowFromContext returns the current real time associated with context ctx. +func NowFromContext(ctx context.Context) Time { + if clk := RealtimeClockFromContext(ctx); clk != nil { + return clk.Now() + } + panic("encountered context without RealtimeClock") +} diff --git a/pkg/sentry/kernel/time/time.go b/pkg/sentry/kernel/time/time.go new file mode 100644 index 000000000..c223c2f19 --- /dev/null +++ b/pkg/sentry/kernel/time/time.go @@ -0,0 +1,649 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package time defines the Timer type, which provides a periodic timer that +// works by sampling a user-provided clock. +package time + +import ( + "fmt" + "math" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Events that may be generated by a Clock. +const ( + // ClockEventSet occurs when a Clock undergoes a discontinuous change. + ClockEventSet waiter.EventMask = 1 << iota + + // ClockEventRateIncrease occurs when the rate at which a Clock advances + // increases significantly, such that values returned by previous calls to + // Clock.WallTimeUntil may be too large. + ClockEventRateIncrease +) + +// Time represents an instant in time with nanosecond precision. +// +// Time may represent time with respect to any clock and may not have any +// meaning in the real world. +type Time struct { + ns int64 +} + +var ( + // MinTime is the zero time instant, the lowest possible time that can + // be represented by Time. + MinTime = Time{ns: math.MinInt64} + + // MaxTime is the highest possible time that can be represented by + // Time. + MaxTime = Time{ns: math.MaxInt64} + + // ZeroTime represents the zero time in an unspecified Clock's domain. + ZeroTime = Time{ns: 0} +) + +const ( + // MinDuration is the minimum duration representable by time.Duration. + MinDuration = time.Duration(math.MinInt64) + + // MaxDuration is the maximum duration representable by time.Duration. + MaxDuration = time.Duration(math.MaxInt64) +) + +// FromNanoseconds returns a Time representing the point ns nanoseconds after +// an unspecified Clock's zero time. +func FromNanoseconds(ns int64) Time { + return Time{ns} +} + +// FromSeconds returns a Time representing the point s seconds after an +// unspecified Clock's zero time. +func FromSeconds(s int64) Time { + if s > math.MaxInt64/time.Second.Nanoseconds() { + return MaxTime + } + return Time{s * 1e9} +} + +// FromUnix converts from Unix seconds and nanoseconds to Time, assuming a real +// time Unix clock domain. +func FromUnix(s int64, ns int64) Time { + if s > math.MaxInt64/time.Second.Nanoseconds() { + return MaxTime + } + t := s * 1e9 + if t > math.MaxInt64-ns { + return MaxTime + } + return Time{t + ns} +} + +// FromTimespec converts from Linux Timespec to Time. +func FromTimespec(ts linux.Timespec) Time { + return Time{ts.ToNsecCapped()} +} + +// FromTimeval converts a Linux Timeval to Time. +func FromTimeval(tv linux.Timeval) Time { + return Time{tv.ToNsecCapped()} +} + +// Nanoseconds returns nanoseconds elapsed since the zero time in t's Clock +// domain. If t represents walltime, this is nanoseconds since the Unix epoch. +func (t Time) Nanoseconds() int64 { + return t.ns +} + +// Seconds returns seconds elapsed since the zero time in t's Clock domain. If +// t represents walltime, this is seconds since Unix epoch. +func (t Time) Seconds() int64 { + return t.Nanoseconds() / time.Second.Nanoseconds() +} + +// Timespec converts Time to a Linux timespec. +func (t Time) Timespec() linux.Timespec { + return linux.NsecToTimespec(t.Nanoseconds()) +} + +// Unix returns the (seconds, nanoseconds) representation of t such that +// seconds*1e9 + nanoseconds = t. +func (t Time) Unix() (s int64, ns int64) { + s = t.ns / 1e9 + ns = t.ns % 1e9 + return +} + +// TimeT converts Time to a Linux time_t. +func (t Time) TimeT() linux.TimeT { + return linux.NsecToTimeT(t.Nanoseconds()) +} + +// Timeval converts Time to a Linux timeval. +func (t Time) Timeval() linux.Timeval { + return linux.NsecToTimeval(t.Nanoseconds()) +} + +// Add adds the duration of d to t. +func (t Time) Add(d time.Duration) Time { + if t.ns > 0 && d.Nanoseconds() > math.MaxInt64-int64(t.ns) { + return MaxTime + } + if t.ns < 0 && d.Nanoseconds() < math.MinInt64-int64(t.ns) { + return MinTime + } + return Time{int64(t.ns) + d.Nanoseconds()} +} + +// AddTime adds the duration of u to t. +func (t Time) AddTime(u Time) Time { + return t.Add(time.Duration(u.ns)) +} + +// Equal reports whether the two times represent the same instant in time. +func (t Time) Equal(u Time) bool { + return t.ns == u.ns +} + +// Before reports whether the instant t is before the instant u. +func (t Time) Before(u Time) bool { + return t.ns < u.ns +} + +// After reports whether the instant t is after the instant u. +func (t Time) After(u Time) bool { + return t.ns > u.ns +} + +// Sub returns the duration of t - u. +// +// N.B. This measure may not make sense for every Time returned by ktime.Clock. +// Callers who need wall time duration can use ktime.Clock.WallTimeUntil to +// estimate that wall time. +func (t Time) Sub(u Time) time.Duration { + dur := time.Duration(int64(t.ns)-int64(u.ns)) * time.Nanosecond + switch { + case u.Add(dur).Equal(t): + return dur + case t.Before(u): + return MinDuration + default: + return MaxDuration + } +} + +// IsMin returns whether t represents the lowest possible time instant. +func (t Time) IsMin() bool { + return t == MinTime +} + +// IsZero returns whether t represents the zero time instant in t's Clock domain. +func (t Time) IsZero() bool { + return t == ZeroTime +} + +// String returns the time represented in nanoseconds as a string. +func (t Time) String() string { + return fmt.Sprintf("%dns", t.Nanoseconds()) +} + +// A Clock is an abstract time source. +type Clock interface { + // Now returns the current time in nanoseconds according to the Clock. + Now() Time + + // WallTimeUntil returns the estimated wall time until Now will return a + // value greater than or equal to t, given that a recent call to Now + // returned now. If t has already passed, WallTimeUntil may return 0 or a + // negative value. + // + // WallTimeUntil must be abstract to support Clocks that do not represent + // wall time (e.g. thread group execution timers). Clocks that represent + // wall times may embed the WallRateClock type to obtain an appropriate + // trivial implementation of WallTimeUntil. + // + // WallTimeUntil is used to determine when associated Timers should next + // check for expirations. Returning too small a value may result in + // spurious Timer goroutine wakeups, while returning too large a value may + // result in late expirations. Implementations should usually err on the + // side of underestimating. + WallTimeUntil(t, now Time) time.Duration + + // Waitable methods may be used to subscribe to Clock events. Waiters will + // not be preserved by Save and must be re-established during restore. + // + // Since Clock events are transient, implementations of + // waiter.Waitable.Readiness should return 0. + waiter.Waitable +} + +// WallRateClock implements Clock.WallTimeUntil for Clocks that elapse at the +// same rate as wall time. +type WallRateClock struct{} + +// WallTimeUntil implements Clock.WallTimeUntil. +func (WallRateClock) WallTimeUntil(t, now Time) time.Duration { + return t.Sub(now) +} + +// NoClockEvents implements waiter.Waitable for Clocks that do not generate +// events. +type NoClockEvents struct{} + +// Readiness implements waiter.Waitable.Readiness. +func (NoClockEvents) Readiness(mask waiter.EventMask) waiter.EventMask { + return 0 +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (NoClockEvents) EventRegister(e *waiter.Entry, mask waiter.EventMask) { +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (NoClockEvents) EventUnregister(e *waiter.Entry) { +} + +// ClockEventsQueue implements waiter.Waitable by wrapping waiter.Queue and +// defining waiter.Waitable.Readiness as required by Clock. +type ClockEventsQueue struct { + waiter.Queue +} + +// Readiness implements waiter.Waitable.Readiness. +func (ClockEventsQueue) Readiness(mask waiter.EventMask) waiter.EventMask { + return 0 +} + +// A TimerListener receives expirations from a Timer. +type TimerListener interface { + // Notify is called when its associated Timer expires. exp is the number of + // expirations. + // + // Notify is called with the associated Timer's mutex locked, so Notify + // must not take any locks that precede Timer.mu in lock order. + // + // Preconditions: exp > 0. + Notify(exp uint64) + + // Destroy is called when the timer is destroyed. + Destroy() +} + +// Setting contains user-controlled mutable Timer properties. +type Setting struct { + // Enabled is true if the timer is running. + Enabled bool + + // Next is the time in nanoseconds of the next expiration. + Next Time + + // Period is the time in nanoseconds between expirations. If Period is + // zero, the timer will not automatically restart after expiring. + // + // Invariant: Period >= 0. + Period time.Duration +} + +// SettingFromSpec converts a (value, interval) pair to a Setting based on a +// reading from c. value is interpreted as a time relative to c.Now(). +func SettingFromSpec(value time.Duration, interval time.Duration, c Clock) (Setting, error) { + if value < 0 { + return Setting{}, syserror.EINVAL + } + if value == 0 { + return Setting{Period: interval}, nil + } + return Setting{ + Enabled: true, + Next: c.Now().Add(value), + Period: interval, + }, nil +} + +// SettingFromAbsSpec converts a (value, interval) pair to a Setting based on a +// reading from c. value is interpreted as an absolute time. +func SettingFromAbsSpec(value Time, interval time.Duration) (Setting, error) { + if value.Before(ZeroTime) { + return Setting{}, syserror.EINVAL + } + if value.IsZero() { + return Setting{Period: interval}, nil + } + return Setting{ + Enabled: true, + Next: value, + Period: interval, + }, nil +} + +// SpecFromSetting converts a timestamp and a Setting to a (relative value, +// interval) pair, as used by most Linux syscalls that return a struct +// itimerval or struct itimerspec. +func SpecFromSetting(now Time, s Setting) (value, period time.Duration) { + if !s.Enabled { + return 0, s.Period + } + return s.Next.Sub(now), s.Period +} + +// advancedTo returns an updated Setting and a number of expirations after +// the associated Clock indicates a time of now. +// +// Settings may be created by successive calls to advancedTo with decreasing +// values of now (i.e. time may appear to go backward). Supporting this is +// required to support non-monotonic clocks, as well as allowing +// Timer.clock.Now() to be called without holding Timer.mu. +func (s Setting) advancedTo(now Time) (Setting, uint64) { + if !s.Enabled { + return s, 0 + } + if s.Next.After(now) { + return s, 0 + } + if s.Period == 0 { + s.Enabled = false + return s, 1 + } + exp := 1 + uint64(now.Sub(s.Next).Nanoseconds())/uint64(s.Period) + s.Next = s.Next.Add(time.Duration(uint64(s.Period) * exp)) + return s, exp +} + +// Timer is an optionally-periodic timer driven by sampling a user-specified +// Clock. Timer's semantics support the requirements of Linux's interval timers +// (setitimer(2), timer_create(2), timerfd_create(2)). +// +// Timers should be created using NewTimer and must be cleaned up by calling +// Timer.Destroy when no longer used. +type Timer struct { + // clock is the time source. clock is immutable. + clock Clock + + // listener is notified of expirations. listener is immutable. + listener TimerListener + + // mu protects the following mutable fields. + mu sync.Mutex `state:"nosave"` + + // setting is the timer setting. setting is protected by mu. + setting Setting + + // paused is true if the Timer is paused. paused is protected by mu. + paused bool + + // kicker is used to wake the Timer goroutine. The kicker pointer is + // immutable, but its state is protected by mu. + kicker *time.Timer `state:"nosave"` + + // entry is registered with clock.EventRegister. entry is immutable. + // + // Per comment in Clock, entry must be re-registered after restore; per + // comment in Timer.Load, this is done in Timer.Resume. + entry waiter.Entry `state:"nosave"` + + // events is the channel that will be notified whenever entry receives an + // event. It is also closed by Timer.Destroy to instruct the Timer + // goroutine to exit. + events chan struct{} `state:"nosave"` +} + +// timerTickEvents are Clock events that require the Timer goroutine to Tick +// prematurely. +const timerTickEvents = ClockEventSet | ClockEventRateIncrease + +// NewTimer returns a new Timer that will obtain time from clock and send +// expirations to listener. The Timer is initially stopped and has no first +// expiration or period configured. +func NewTimer(clock Clock, listener TimerListener) *Timer { + t := &Timer{ + clock: clock, + listener: listener, + } + t.init() + return t +} + +// After waits for the duration to elapse according to clock and then sends a +// notification on the returned channel. The timer is started immediately and +// will fire exactly once. The second return value is the start time used with +// the duration. +// +// Callers must call Timer.Destroy. +func After(clock Clock, duration time.Duration) (*Timer, Time, <-chan struct{}) { + notifier, tchan := NewChannelNotifier() + t := NewTimer(clock, notifier) + now := clock.Now() + + t.Swap(Setting{ + Enabled: true, + Period: 0, + Next: now.Add(duration), + }) + return t, now, tchan +} + +// init initializes Timer state that is not preserved across save/restore. If +// init has already been called, calling it again is a no-op. +// +// Preconditions: t.mu must be locked, or the caller must have exclusive access +// to t. +func (t *Timer) init() { + if t.kicker != nil { + return + } + // If t.kicker is nil, the Timer goroutine can't be running, so we can't + // race with it. + t.kicker = time.NewTimer(0) + t.entry, t.events = waiter.NewChannelEntry(nil) + t.clock.EventRegister(&t.entry, timerTickEvents) + go t.runGoroutine() // S/R-SAFE: synchronized by t.mu +} + +// Destroy releases resources owned by the Timer. A Destroyed Timer must not be +// used again; in particular, a Destroyed Timer should not be Saved. +func (t *Timer) Destroy() { + // Stop the Timer, ensuring that the Timer goroutine will not call + // t.kicker.Reset, before calling t.kicker.Stop. + t.mu.Lock() + t.setting.Enabled = false + t.mu.Unlock() + t.kicker.Stop() + // Unregister t.entry, ensuring that the Clock will not send to t.events, + // before closing t.events to instruct the Timer goroutine to exit. + t.clock.EventUnregister(&t.entry) + close(t.events) + t.listener.Destroy() +} + +func (t *Timer) runGoroutine() { + for { + select { + case <-t.kicker.C: + case _, ok := <-t.events: + if !ok { + // Channel closed by Destroy. + return + } + } + t.Tick() + } +} + +// Tick requests that the Timer immediately check for expirations and +// re-evaluate when it should next check for expirations. +func (t *Timer) Tick() { + now := t.clock.Now() + t.mu.Lock() + defer t.mu.Unlock() + if t.paused { + return + } + s, exp := t.setting.advancedTo(now) + t.setting = s + if exp > 0 { + t.listener.Notify(exp) + } + t.resetKickerLocked(now) +} + +// Pause pauses the Timer, ensuring that it does not generate any further +// expirations until Resume is called. If the Timer is already paused, Pause +// has no effect. +func (t *Timer) Pause() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = true + // t.kicker may be nil if we were restored but never resumed. + if t.kicker != nil { + t.kicker.Stop() + } +} + +// Resume ends the effect of Pause. If the Timer is not paused, Resume has no +// effect. +func (t *Timer) Resume() { + t.mu.Lock() + defer t.mu.Unlock() + if !t.paused { + return + } + t.paused = false + + // Lazily initialize the Timer. We can't call Timer.init until Timer.Resume + // because save/restore will restore Timers before + // kernel.Timekeeper.SetClocks() has been called, so if t.clock is backed + // by a kernel.Timekeeper then the Timer goroutine will panic if it calls + // t.clock.Now(). + t.init() + + // Kick the Timer goroutine in case it was already initialized, but the + // Timer goroutine was sleeping. + t.kicker.Reset(0) +} + +// Get returns a snapshot of the Timer's current Setting and the time +// (according to the Timer's Clock) at which the snapshot was taken. +// +// Preconditions: The Timer must not be paused (since its Setting cannot +// be advanced to the current time while it is paused.) +func (t *Timer) Get() (Time, Setting) { + now := t.clock.Now() + t.mu.Lock() + defer t.mu.Unlock() + if t.paused { + panic(fmt.Sprintf("Timer.Get called on paused Timer %p", t)) + } + s, exp := t.setting.advancedTo(now) + t.setting = s + if exp > 0 { + t.listener.Notify(exp) + } + t.resetKickerLocked(now) + return now, s +} + +// Swap atomically changes the Timer's Setting and returns the Timer's previous +// Setting and the time (according to the Timer's Clock) at which the snapshot +// was taken. Setting s.Enabled to true starts the Timer, while setting +// s.Enabled to false stops it. +// +// Preconditions: The Timer must not be paused. +func (t *Timer) Swap(s Setting) (Time, Setting) { + return t.SwapAnd(s, nil) +} + +// SwapAnd atomically changes the Timer's Setting, calls f if it is not nil, +// and returns the Timer's previous Setting and the time (according to the +// Timer's Clock) at which the Setting was changed. Setting s.Enabled to true +// starts the timer, while setting s.Enabled to false stops it. +// +// Preconditions: The Timer must not be paused. f cannot call any Timer methods +// since it is called with the Timer mutex locked. +func (t *Timer) SwapAnd(s Setting, f func()) (Time, Setting) { + now := t.clock.Now() + t.mu.Lock() + defer t.mu.Unlock() + if t.paused { + panic(fmt.Sprintf("Timer.SwapAnd called on paused Timer %p", t)) + } + oldS, oldExp := t.setting.advancedTo(now) + if oldExp > 0 { + t.listener.Notify(oldExp) + } + if f != nil { + f() + } + newS, newExp := s.advancedTo(now) + t.setting = newS + if newExp > 0 { + t.listener.Notify(newExp) + } + t.resetKickerLocked(now) + return now, oldS +} + +// Preconditions: t.mu must be locked. +func (t *Timer) resetKickerLocked(now Time) { + if t.setting.Enabled { + // Clock.WallTimeUntil may return a negative value. This is fine; + // time.when treats negative Durations as 0. + t.kicker.Reset(t.clock.WallTimeUntil(t.setting.Next, now)) + } + // We don't call t.kicker.Stop if !t.setting.Enabled because in most cases + // resetKickerLocked will be called from the Timer goroutine itself, in + // which case t.kicker has already fired and t.kicker.Stop will be an + // expensive no-op (time.Timer.Stop => time.stopTimer => runtime.stopTimer + // => runtime.deltimer). +} + +// Clock returns the Clock used by t. +func (t *Timer) Clock() Clock { + return t.clock +} + +// ChannelNotifier is a TimerListener that sends a message on an empty struct +// channel. +// +// ChannelNotifier cannot be saved or loaded. +type ChannelNotifier struct { + // tchan must be a buffered channel. + tchan chan struct{} +} + +// NewChannelNotifier creates a new channel notifier. +// +// If the notifier is used with a timer, Timer.Destroy will close the channel +// returned here. +func NewChannelNotifier() (TimerListener, <-chan struct{}) { + tchan := make(chan struct{}, 1) + return &ChannelNotifier{tchan}, tchan +} + +// Notify implements ktime.TimerListener.Notify. +func (c *ChannelNotifier) Notify(uint64) { + select { + case c.tchan <- struct{}{}: + default: + } +} + +// Destroy implements ktime.TimerListener.Destroy and will close the channel. +func (c *ChannelNotifier) Destroy() { + close(c.tchan) +} diff --git a/pkg/sentry/kernel/timekeeper.go b/pkg/sentry/kernel/timekeeper.go new file mode 100644 index 000000000..3f16c1676 --- /dev/null +++ b/pkg/sentry/kernel/timekeeper.go @@ -0,0 +1,270 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/log" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + sentrytime "gvisor.googlesource.com/gvisor/pkg/sentry/time" +) + +// Timekeeper manages all of the kernel clocks. +type Timekeeper struct { + // clocks are the clock sources. + // + // These are not saved directly, as the new machine's clock may behave + // differently. + // + // It is set only once, by SetClocks. + clocks sentrytime.Clocks `state:"nosave"` + + // bootTime is the realtime when the system "booted". i.e., when + // SetClocks was called in the initial (not restored) run. + bootTime ktime.Time + + // monotonicOffset is the offset to apply to the monotonic clock output + // from clocks. + // + // It is set only once, by SetClocks. + monotonicOffset int64 `state:"nosave"` + + // restored indicates that this Timekeeper was restored from a state + // file. + restored bool `state:"nosave"` + + // saveMonotonic is the (offset) value of the monotonic clock at the + // time of save. + // + // It is only valid if restored is true. + // + // It is only used in SetClocks after restore to compute the new + // monotonicOffset. + saveMonotonic int64 + + // saveRealtime is the value of the realtime clock at the time of save. + // + // It is only valid if restored is true. + // + // It is only used in SetClocks after restore to compute the new + // monotonicOffset. + saveRealtime int64 + + // params manages the parameter page. + params *VDSOParamPage + + // mu protects destruction with stop and wg. + mu sync.Mutex `state:"nosave"` + + // stop is used to tell the update goroutine to exit. + stop chan struct{} `state:"nosave"` + + // wg is used to indicate that the update goroutine has exited. + wg sync.WaitGroup `state:"nosave"` +} + +// NewTimekeeper returns a Timekeeper that is automatically kept up-to-date. +// NewTimekeeper does not take ownership of paramPage. +// +// SetClocks must be called on the returned Timekeeper before it is usable. +func NewTimekeeper(platform platform.Platform, paramPage platform.FileRange) (*Timekeeper, error) { + return &Timekeeper{ + params: NewVDSOParamPage(platform, paramPage), + }, nil +} + +// SetClocks the backing clock source. +// +// SetClocks must be called before the Timekeeper is used, and it may not be +// called more than once, as changing the clock source without extra correction +// could cause time discontinuities. +// +// It must also be called after Load. +func (t *Timekeeper) SetClocks(c sentrytime.Clocks) { + // Update the params, marking them "not ready", as we may need to + // restart calibration on this new machine. + if t.restored { + if err := t.params.Write(func() vdsoParams { + return vdsoParams{} + }); err != nil { + panic("unable to reset VDSO params: " + err.Error()) + } + } + + if t.clocks != nil { + panic("SetClocks called on previously-initialized Timekeeper") + } + + t.clocks = c + + // Compute the offset of the monotonic clock from the base Clocks. + // + // In a fresh (not restored) sentry, monotonic time starts at zero. + // + // In a restored sentry, monotonic time jumps forward by approximately + // the same amount as real time. There are no guarantees here, we are + // just making a best-effort attempt to to make it appear that the app + // was simply not scheduled for a long period, rather than that the + // real time clock was changed. + // + // If real time went backwards, it remains the same. + wantMonotonic := int64(0) + + nowMonotonic, err := t.clocks.GetTime(sentrytime.Monotonic) + if err != nil { + panic("Unable to get current monotonic time: " + err.Error()) + } + + nowRealtime, err := t.clocks.GetTime(sentrytime.Realtime) + if err != nil { + panic("Unable to get current realtime: " + err.Error()) + } + + if t.restored { + wantMonotonic = t.saveMonotonic + elapsed := nowRealtime - t.saveRealtime + if elapsed > 0 { + wantMonotonic += elapsed + } + } + + t.monotonicOffset = wantMonotonic - nowMonotonic + + if !t.restored { + // Hold on to the initial "boot" time. + t.bootTime = ktime.FromNanoseconds(nowRealtime) + } + + t.mu.Lock() + defer t.mu.Unlock() + t.startUpdater() +} + +// startUpdater starts an update goroutine that keeps the clocks updated. +// +// mu must be held. +func (t *Timekeeper) startUpdater() { + if t.stop != nil { + // Timekeeper already started + return + } + t.stop = make(chan struct{}) + + // Keep the clocks up to date. + // + // Note that the Go runtime uses host CLOCK_MONOTONIC to service the + // timer, so it may run at a *slightly* different rate from the + // application CLOCK_MONOTONIC. That is fine, as we only need to update + // at approximately this rate. + timer := time.NewTicker(sentrytime.ApproxUpdateInterval) + t.wg.Add(1) + go func() { // S/R-SAFE: stopped during save. + for { + // Start with an update immediately, so the clocks are + // ready ASAP. + + // Call Update within a Write block to prevent the VDSO + // from using the old params between Update and + // Write. + if err := t.params.Write(func() vdsoParams { + monotonicParams, monotonicOk, realtimeParams, realtimeOk := t.clocks.Update() + + var p vdsoParams + if monotonicOk { + p.monotonicReady = 1 + p.monotonicBaseCycles = int64(monotonicParams.BaseCycles) + p.monotonicBaseRef = int64(monotonicParams.BaseRef) + t.monotonicOffset + p.monotonicFrequency = monotonicParams.Frequency + } + if realtimeOk { + p.realtimeReady = 1 + p.realtimeBaseCycles = int64(realtimeParams.BaseCycles) + p.realtimeBaseRef = int64(realtimeParams.BaseRef) + p.realtimeFrequency = realtimeParams.Frequency + } + + log.Debugf("Updating VDSO parameters: %+v", p) + + return p + }); err != nil { + log.Warningf("Unable to update VDSO parameter page: %v", err) + } + + select { + case <-timer.C: + case <-t.stop: + t.wg.Done() + return + } + } + }() +} + +// stopUpdater stops the update goroutine, blocking until it exits. +// +// mu must be held. +func (t *Timekeeper) stopUpdater() { + if t.stop == nil { + // Updater not running. + return + } + + close(t.stop) + t.wg.Wait() + t.stop = nil +} + +// Destroy destroys the Timekeeper, freeing all associated resources. +func (t *Timekeeper) Destroy() { + t.mu.Lock() + defer t.mu.Unlock() + + t.stopUpdater() +} + +// PauseUpdates stops clock parameter updates. This should only be used when +// Tasks are not running and thus cannot access the clock. +func (t *Timekeeper) PauseUpdates() { + t.mu.Lock() + defer t.mu.Unlock() + t.stopUpdater() +} + +// ResumeUpdates restarts clock parameter updates stopped by PauseUpdates. +func (t *Timekeeper) ResumeUpdates() { + t.mu.Lock() + defer t.mu.Unlock() + t.startUpdater() +} + +// GetTime returns the current time in nanoseconds. +func (t *Timekeeper) GetTime(c sentrytime.ClockID) (int64, error) { + if t.clocks == nil { + panic("Timekeeper used before initialized with SetClocks") + } + now, err := t.clocks.GetTime(c) + if err == nil && c == sentrytime.Monotonic { + now += t.monotonicOffset + } + return now, err +} + +// BootTime returns the system boot real time. +func (t *Timekeeper) BootTime() ktime.Time { + return t.bootTime +} diff --git a/pkg/sentry/kernel/timekeeper_state.go b/pkg/sentry/kernel/timekeeper_state.go new file mode 100644 index 000000000..aee983ac7 --- /dev/null +++ b/pkg/sentry/kernel/timekeeper_state.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/time" +) + +// beforeSave is invoked by stateify. +func (t *Timekeeper) beforeSave() { + if t.stop != nil { + panic("pauseUpdates must be called before Save") + } + + // N.B. we want the *offset* monotonic time. + var err error + if t.saveMonotonic, err = t.GetTime(time.Monotonic); err != nil { + panic("unable to get current monotonic time: " + err.Error()) + } + + if t.saveRealtime, err = t.GetTime(time.Realtime); err != nil { + panic("unable to get current realtime: " + err.Error()) + } +} + +// afterLoad is invoked by stateify. +func (t *Timekeeper) afterLoad() { + t.restored = true +} diff --git a/pkg/sentry/kernel/timekeeper_test.go b/pkg/sentry/kernel/timekeeper_test.go new file mode 100644 index 000000000..08bacba4f --- /dev/null +++ b/pkg/sentry/kernel/timekeeper_test.go @@ -0,0 +1,156 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + sentrytime "gvisor.googlesource.com/gvisor/pkg/sentry/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// mockClocks is a sentrytime.Clocks that simply returns the times in the +// struct. +type mockClocks struct { + monotonic int64 + realtime int64 +} + +// Update implements sentrytime.Clocks.Update. It does nothing. +func (*mockClocks) Update() (monotonicParams sentrytime.Parameters, monotonicOk bool, realtimeParam sentrytime.Parameters, realtimeOk bool) { + return +} + +// Update implements sentrytime.Clocks.GetTime. +func (c *mockClocks) GetTime(id sentrytime.ClockID) (int64, error) { + switch id { + case sentrytime.Monotonic: + return c.monotonic, nil + case sentrytime.Realtime: + return c.realtime, nil + default: + return 0, syserror.EINVAL + } +} + +// stateTestClocklessTimekeeper returns a test Timekeeper which has not had +// SetClocks called. +func stateTestClocklessTimekeeper(tb testing.TB) *Timekeeper { + ctx := contexttest.Context(tb) + p := platform.FromContext(ctx) + fr, err := p.Memory().Allocate(usermem.PageSize, usage.Anonymous) + if err != nil { + tb.Fatalf("failed to allocate memory: %v", err) + } + return &Timekeeper{ + params: NewVDSOParamPage(p, fr), + } +} + +func stateTestTimekeeper(tb testing.TB) *Timekeeper { + t := stateTestClocklessTimekeeper(tb) + t.SetClocks(sentrytime.NewCalibratedClocks()) + return t +} + +// TestTimekeeperMonotonicZero tests that monotonic time starts at zero. +func TestTimekeeperMonotonicZero(t *testing.T) { + c := &mockClocks{ + monotonic: 100000, + } + + tk := stateTestClocklessTimekeeper(t) + tk.SetClocks(c) + defer tk.Destroy() + + now, err := tk.GetTime(sentrytime.Monotonic) + if err != nil { + t.Errorf("GetTime err got %v want nil", err) + } + if now != 0 { + t.Errorf("GetTime got %d want 0", now) + } + + c.monotonic += 10 + + now, err = tk.GetTime(sentrytime.Monotonic) + if err != nil { + t.Errorf("GetTime err got %v want nil", err) + } + if now != 10 { + t.Errorf("GetTime got %d want 10", now) + } +} + +// TestTimekeeperMonotonicJumpForward tests that monotonic time jumps forward +// after restore. +func TestTimekeeperMonotonicForward(t *testing.T) { + c := &mockClocks{ + monotonic: 900000, + realtime: 600000, + } + + tk := stateTestClocklessTimekeeper(t) + tk.restored = true + tk.saveMonotonic = 100000 + tk.saveRealtime = 400000 + tk.SetClocks(c) + defer tk.Destroy() + + // The monotonic clock should jump ahead by 200000 to 300000. + // + // The new system monotonic time (900000) is irrelevant to what the app + // sees. + now, err := tk.GetTime(sentrytime.Monotonic) + if err != nil { + t.Errorf("GetTime err got %v want nil", err) + } + if now != 300000 { + t.Errorf("GetTime got %d want 300000", now) + } +} + +// TestTimekeeperMonotonicJumpBackwards tests that monotonic time does not jump +// backwards when realtime goes backwards. +func TestTimekeeperMonotonicJumpBackwards(t *testing.T) { + c := &mockClocks{ + monotonic: 900000, + realtime: 400000, + } + + tk := stateTestClocklessTimekeeper(t) + tk.restored = true + tk.saveMonotonic = 100000 + tk.saveRealtime = 600000 + tk.SetClocks(c) + defer tk.Destroy() + + // The monotonic clock should remain at 100000. + // + // The new system monotonic time (900000) is irrelevant to what the app + // sees and we don't want to jump the monotonic clock backwards like + // realtime did. + now, err := tk.GetTime(sentrytime.Monotonic) + if err != nil { + t.Errorf("GetTime err got %v want nil", err) + } + if now != 100000 { + t.Errorf("GetTime got %d want 100000", now) + } +} diff --git a/pkg/sentry/kernel/timer.go b/pkg/sentry/kernel/timer.go new file mode 100644 index 000000000..03a3310be --- /dev/null +++ b/pkg/sentry/kernel/timer.go @@ -0,0 +1,282 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + sentrytime "gvisor.googlesource.com/gvisor/pkg/sentry/time" +) + +// timekeeperClock is a ktime.Clock that reads time from a +// kernel.Timekeeper-managed clock. +type timekeeperClock struct { + tk *Timekeeper + c sentrytime.ClockID + + // Implements ktime.Clock.WallTimeUntil. + ktime.WallRateClock `state:"nosave"` + + // Implements waiter.Waitable. (We have no ability to detect + // discontinuities from external changes to CLOCK_REALTIME). + ktime.NoClockEvents `state:"nosave"` +} + +// Now implements ktime.Clock.Now. +func (tc *timekeeperClock) Now() ktime.Time { + now, err := tc.tk.GetTime(tc.c) + if err != nil { + panic(fmt.Sprintf("timekeeperClock(ClockID=%v)).Now: %v", tc.c, err)) + } + return ktime.FromNanoseconds(now) +} + +// tgClock is a ktime.Clock that measures the time a thread group has spent +// executing. +type tgClock struct { + tg *ThreadGroup + + // If includeSys is true, the tgClock includes both time spent executing + // application code as well as time spent in the sentry. Otherwise, the + // tgClock includes only time spent executing application code. + includeSys bool + + // Implements waiter.Waitable. + ktime.ClockEventsQueue `state:"nosave"` +} + +// UserCPUClock returns a ktime.Clock that measures the time that a thread +// group has spent executing. +func (tg *ThreadGroup) UserCPUClock() ktime.Clock { + return tg.tm.virtClock +} + +// CPUClock returns a ktime.Clock that measures the time that a thread group +// has spent executing, including sentry time. +func (tg *ThreadGroup) CPUClock() ktime.Clock { + return tg.tm.profClock +} + +// Now implements ktime.Clock.Now. +func (tgc *tgClock) Now() ktime.Time { + stats := tgc.tg.CPUStats() + if tgc.includeSys { + return ktime.FromNanoseconds((stats.UserTime + stats.SysTime).Nanoseconds()) + } + return ktime.FromNanoseconds(stats.UserTime.Nanoseconds()) +} + +// WallTimeUntil implements ktime.Clock.WallTimeUntil. +func (tgc *tgClock) WallTimeUntil(t, now ktime.Time) time.Duration { + // The assumption here is that the time spent in this process (not matter + // virtual or prof) should not exceed wall time * active tasks, since + // Task.exitThreadGroup stops accounting as it transitions to + // TaskExitInitiated. + tgc.tg.pidns.owner.mu.RLock() + n := tgc.tg.activeTasks + tgc.tg.pidns.owner.mu.RUnlock() + if n == 0 { + if t.Before(now) { + return 0 + } + // The timer tick raced with thread group exit, after which no more + // tasks can enter the thread group. So tgc.Now() will never advance + // again. Return a large delay; the timer should be stopped long before + // it comes again anyway. + return time.Hour + } + // This is a lower bound on the amount of time that can elapse before an + // associated timer expires, so returning this value tends to result in a + // sequence of closely-spaced ticks just before timer expiry. To avoid + // this, round up to the nearest ClockTick; CPU usage measurements are + // limited to this resolution anyway. + remaining := time.Duration(int64(t.Sub(now))/int64(n)) * time.Nanosecond + return ((remaining + (linux.ClockTick - time.Nanosecond)) / linux.ClockTick) * linux.ClockTick +} + +// taskClock is a ktime.Clock that measures the time that a task has spent +// executing. +type taskClock struct { + t *Task + + // If includeSys is true, the taskClock includes both time spent executing + // application code as well as time spent in the sentry. Otherwise, the + // taskClock includes only time spent executing application code. + includeSys bool + + // Implements waiter.Waitable. TimeUntil wouldn't change its estimation + // based on either of the clock events, so there's no event to be + // notified for. + ktime.NoClockEvents `state:"nosave"` + + // Implements ktime.Clock.WallTimeUntil. + // + // As an upper bound, a task's clock cannot advance faster than CPU + // time. It would have to execute at a rate of more than 1 task-second + // per 1 CPU-second, which isn't possible. + ktime.WallRateClock `state:"nosave"` +} + +// UserCPUClock returns a clock measuring the CPU time the task has spent +// executing application code. +func (t *Task) UserCPUClock() ktime.Clock { + return &taskClock{t: t, includeSys: false} +} + +// CPUClock returns a clock measuring the CPU time the task has spent executing +// application and "kernel" code. +func (t *Task) CPUClock() ktime.Clock { + return &taskClock{t: t, includeSys: true} +} + +// Now implements ktime.Clock.Now. +func (tc *taskClock) Now() ktime.Time { + stats := tc.t.CPUStats() + if tc.includeSys { + return ktime.FromNanoseconds((stats.UserTime + stats.SysTime).Nanoseconds()) + } + return ktime.FromNanoseconds(stats.UserTime.Nanoseconds()) +} + +// signalNotifier is a ktime.Listener that sends signals to a ThreadGroup. +type signalNotifier struct { + tg *ThreadGroup + signal linux.Signal + realTimer bool + includeSys bool +} + +// Notify implements ktime.TimerListener.Notify. +func (s *signalNotifier) Notify(exp uint64) { + // Since all signals sent using a signalNotifier are standard (not + // real-time) signals, we can ignore the number of expirations and send + // only a single signal. + if s.realTimer { + // real timer signal sent to leader. See kernel/time/itimer.c:it_real_fn + s.tg.SendSignal(sigPriv(s.signal)) + } else { + s.tg.SendTimerSignal(sigPriv(s.signal), s.includeSys) + } +} + +// Destroy implements ktime.TimerListener.Destroy. +func (s *signalNotifier) Destroy() {} + +// TimerManager is a collection of supported process cpu timers. +type TimerManager struct { + // Clocks used to drive thread group execution time timers. + virtClock *tgClock + profClock *tgClock + + RealTimer *ktime.Timer + VirtualTimer *ktime.Timer + ProfTimer *ktime.Timer + SoftLimitTimer *ktime.Timer + HardLimitTimer *ktime.Timer +} + +// newTimerManager returns a new instance of TimerManager. +func newTimerManager(tg *ThreadGroup, monotonicClock ktime.Clock) TimerManager { + virtClock := &tgClock{tg: tg, includeSys: false} + profClock := &tgClock{tg: tg, includeSys: true} + tm := TimerManager{ + virtClock: virtClock, + profClock: profClock, + RealTimer: ktime.NewTimer(monotonicClock, &signalNotifier{ + tg: tg, + signal: linux.SIGALRM, + realTimer: true, + includeSys: false, + }), + VirtualTimer: ktime.NewTimer(virtClock, &signalNotifier{ + tg: tg, + signal: linux.SIGVTALRM, + realTimer: false, + includeSys: false, + }), + ProfTimer: ktime.NewTimer(profClock, &signalNotifier{ + tg: tg, + signal: linux.SIGPROF, + realTimer: false, + includeSys: true, + }), + SoftLimitTimer: ktime.NewTimer(profClock, &signalNotifier{ + tg: tg, + signal: linux.SIGXCPU, + realTimer: false, + includeSys: true, + }), + HardLimitTimer: ktime.NewTimer(profClock, &signalNotifier{ + tg: tg, + signal: linux.SIGKILL, + realTimer: false, + includeSys: true, + }), + } + tm.applyCPULimits(tg.Limits().Get(limits.CPU)) + return tm +} + +// Save saves this TimerManger. + +// destroy destroys all timers. +func (tm *TimerManager) destroy() { + tm.RealTimer.Destroy() + tm.VirtualTimer.Destroy() + tm.ProfTimer.Destroy() + tm.SoftLimitTimer.Destroy() + tm.HardLimitTimer.Destroy() +} + +func (tm *TimerManager) applyCPULimits(l limits.Limit) { + tm.SoftLimitTimer.Swap(ktime.Setting{ + Enabled: l.Cur != limits.Infinity, + Next: ktime.FromNanoseconds((time.Duration(l.Cur) * time.Second).Nanoseconds()), + Period: time.Second, + }) + tm.HardLimitTimer.Swap(ktime.Setting{ + Enabled: l.Max != limits.Infinity, + Next: ktime.FromNanoseconds((time.Duration(l.Max) * time.Second).Nanoseconds()), + }) +} + +// kick is called when the number of threads in the thread group associated +// with tm increases. +func (tm *TimerManager) kick() { + tm.virtClock.Notify(ktime.ClockEventRateIncrease) + tm.profClock.Notify(ktime.ClockEventRateIncrease) +} + +// pause is to pause the timers and stop timer signal delivery. +func (tm *TimerManager) pause() { + tm.RealTimer.Pause() + tm.VirtualTimer.Pause() + tm.ProfTimer.Pause() + tm.SoftLimitTimer.Pause() + tm.HardLimitTimer.Pause() +} + +// resume is to resume the timers and continue timer signal delivery. +func (tm *TimerManager) resume() { + tm.RealTimer.Resume() + tm.VirtualTimer.Resume() + tm.ProfTimer.Resume() + tm.SoftLimitTimer.Resume() + tm.HardLimitTimer.Resume() +} diff --git a/pkg/sentry/kernel/uts_namespace.go b/pkg/sentry/kernel/uts_namespace.go new file mode 100644 index 000000000..58e9b4d1b --- /dev/null +++ b/pkg/sentry/kernel/uts_namespace.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" +) + +// UTSNamespace represents a UTS namespace, a holder of two system identifiers: +// the hostname and domain name. +type UTSNamespace struct { + // mu protects all fields below. + mu sync.Mutex `state:"nosave"` + hostName string + domainName string + + // userns is the user namespace associated with the UTSNamespace. + // Privileged operations on this UTSNamespace must have appropriate + // capabilities in userns. + // + // userns is immutable. + userns *auth.UserNamespace +} + +// NewUTSNamespace creates a new UTS namespace. +func NewUTSNamespace(hostName, domainName string, userns *auth.UserNamespace) *UTSNamespace { + return &UTSNamespace{ + hostName: hostName, + domainName: domainName, + userns: userns, + } +} + +// UTSNamespace returns the task's UTS namespace. +func (t *Task) UTSNamespace() *UTSNamespace { + t.mu.Lock() + defer t.mu.Unlock() + return t.utsns +} + +// HostName returns the host name of this UTS namespace. +func (u *UTSNamespace) HostName() string { + u.mu.Lock() + defer u.mu.Unlock() + return u.hostName +} + +// SetHostName sets the host name of this UTS namespace. +func (u *UTSNamespace) SetHostName(host string) { + u.mu.Lock() + defer u.mu.Unlock() + u.hostName = host +} + +// DomainName returns the domain name of this UTS namespace. +func (u *UTSNamespace) DomainName() string { + u.mu.Lock() + defer u.mu.Unlock() + return u.domainName +} + +// SetDomainName sets the domain name of this UTS namespace. +func (u *UTSNamespace) SetDomainName(domain string) { + u.mu.Lock() + defer u.mu.Unlock() + u.domainName = domain +} + +// UserNamespace returns the user namespace associated with this UTS namespace. +func (u *UTSNamespace) UserNamespace() *auth.UserNamespace { + u.mu.Lock() + defer u.mu.Unlock() + return u.userns +} + +// Clone makes a copy of this UTS namespace, associating the given user +// namespace. +func (u *UTSNamespace) Clone(userns *auth.UserNamespace) *UTSNamespace { + u.mu.Lock() + defer u.mu.Unlock() + return &UTSNamespace{ + hostName: u.hostName, + domainName: u.domainName, + userns: userns, + } +} diff --git a/pkg/sentry/kernel/vdso.go b/pkg/sentry/kernel/vdso.go new file mode 100644 index 000000000..0bacbea49 --- /dev/null +++ b/pkg/sentry/kernel/vdso.go @@ -0,0 +1,145 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// vdsoParams are the parameters exposed to the VDSO. +// +// They are exposed to the VDSO via a parameter page managed by VDSOParamPage, +// which also includes a sequence counter. +type vdsoParams struct { + monotonicReady uint64 + monotonicBaseCycles int64 + monotonicBaseRef int64 + monotonicFrequency uint64 + + realtimeReady uint64 + realtimeBaseCycles int64 + realtimeBaseRef int64 + realtimeFrequency uint64 +} + +// VDSOParamPage manages a VDSO parameter page. +// +// Its memory layout looks like: +// +// type page struct { +// // seq is a sequence counter that protects the fields below. +// seq uint64 +// vdsoParams +// } +// +// Everything in the struct is 8 bytes for easy alignment. +// +// It must be kept in sync with params in vdso/vdso_time.cc. +type VDSOParamPage struct { + // The parameter page is fr, allocated from platform.Memory(). + platform platform.Platform + fr platform.FileRange + + // seq is the current sequence count written to the page. + // + // A write is in progress if bit 1 of the counter is set. + // + // Timekeeper's updater goroutine may call Write before equality is + // checked in state_test_util tests, causing this field to change across + // save / restore. + seq uint64 +} + +// NewVDSOParamPage returns a VDSOParamPage. +// +// Preconditions: +// +// * fr is a single page allocated from platform.Memory(). VDSOParamPage does +// not take ownership of fr; it must remain allocated for the lifetime of the +// VDSOParamPage. +// +// * VDSOParamPage must be the only writer to fr. +// +// * platform.Memory().MapInternal(fr) must return a single safemem.Block. +func NewVDSOParamPage(platform platform.Platform, fr platform.FileRange) *VDSOParamPage { + return &VDSOParamPage{platform: platform, fr: fr} +} + +// access returns a mapping of the param page. +func (v *VDSOParamPage) access() (safemem.Block, error) { + bs, err := v.platform.Memory().MapInternal(v.fr, usermem.ReadWrite) + if err != nil { + return safemem.Block{}, err + } + if bs.NumBlocks() != 1 { + panic(fmt.Sprintf("Multiple blocks (%d) in VDSO param BlockSeq", bs.NumBlocks())) + } + return bs.Head(), nil +} + +// incrementSeq increments the sequence counter in the param page. +func (v *VDSOParamPage) incrementSeq(paramPage safemem.Block) error { + next := v.seq + 1 + old, err := safemem.SwapUint64(paramPage, next) + if err != nil { + return err + } + + if old != v.seq { + return fmt.Errorf("unexpected VDSOParamPage seq value: got %d expected %d. Application may hang or get incorrect time from the VDSO.", old, v.seq) + } + + v.seq = next + return nil +} + +// Write updates the VDSO parameters. +// +// Write starts a write block, calls f to get the new parameters, writes +// out the new parameters, then ends the write block. +func (v *VDSOParamPage) Write(f func() vdsoParams) error { + paramPage, err := v.access() + if err != nil { + return err + } + + // Write begin. + next := v.seq + 1 + if next%2 != 1 { + panic("Out-of-order sequence count") + } + + err = v.incrementSeq(paramPage) + if err != nil { + return err + } + + // Get the new params. + p := f() + buf := binary.Marshal(nil, usermem.ByteOrder, p) + + // Skip the sequence counter. + if _, err := safemem.Copy(paramPage.DropFirst(8), safemem.BlockFromSafeSlice(buf)); err != nil { + panic(fmt.Sprintf("Unable to get set VDSO parameters: %v", err)) + } + + // Write end. + return v.incrementSeq(paramPage) +} diff --git a/pkg/sentry/kernel/version.go b/pkg/sentry/kernel/version.go new file mode 100644 index 000000000..a9e84673f --- /dev/null +++ b/pkg/sentry/kernel/version.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kernel + +// Version defines the application-visible system version. +type Version struct { + // Operating system name (e.g. "Linux"). + Sysname string + + // Operating system release (e.g. "3.11.10-amd64"). + Release string + + // Operating system version. On Linux this takes the shape + // "#VERSION CONFIG_FLAGS TIMESTAMP" + // where: + // - VERSION is a sequence counter incremented on every successful build + // - CONFIG_FLAGS is a space-separated list of major enabled kernel features + // (e.g. "SMP" and "PREEMPT") + // - TIMESTAMP is the build timestamp as returned by `date` + Version string +} diff --git a/pkg/sentry/limits/BUILD b/pkg/sentry/limits/BUILD new file mode 100644 index 000000000..06c3e72b0 --- /dev/null +++ b/pkg/sentry/limits/BUILD @@ -0,0 +1,39 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "limits_state", + srcs = [ + "limits.go", + ], + out = "limits_state.go", + package = "limits", +) + +go_library( + name = "limits", + srcs = [ + "context.go", + "limits.go", + "limits_state.go", + "linux.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/limits", + visibility = ["//:sandbox"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/state", + ], +) + +go_test( + name = "limits_test", + size = "small", + srcs = [ + "limits_test.go", + ], + embed = [":limits"], +) diff --git a/pkg/sentry/limits/context.go b/pkg/sentry/limits/context.go new file mode 100644 index 000000000..75e97bf92 --- /dev/null +++ b/pkg/sentry/limits/context.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package limits + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the limit package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxLimits is a Context.Value key for a LimitSet. + CtxLimits contextID = iota +) + +// FromContext returns the limits that apply to ctx. +func FromContext(ctx context.Context) *LimitSet { + if v := ctx.Value(CtxLimits); v != nil { + return v.(*LimitSet) + } + return nil +} diff --git a/pkg/sentry/limits/limits.go b/pkg/sentry/limits/limits.go new file mode 100644 index 000000000..4230ba958 --- /dev/null +++ b/pkg/sentry/limits/limits.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package limits provides resource limits. +package limits + +import ( + "sync" + "syscall" +) + +// LimitType defines a type of resource limit. +type LimitType int + +// Set of constants defining the different types of resource limits. +const ( + CPU LimitType = iota + FileSize + Data + Stack + Core + Rss + ProcessCount + NumberOfFiles + MemoryPagesLocked + AS + Locks + SignalsPending + MessageQueueBytes + Nice + RealTimePriority + Rttime +) + +// Infinity is a constant representing a resource with no limit. +const Infinity = ^uint64(0) + +// Limit specifies a system limit. +type Limit struct { + // Cur specifies the current limit. + Cur uint64 + // Max specifies the maximum settable limit. + Max uint64 +} + +// LimitSet represents the Limits that correspond to each LimitType. +type LimitSet struct { + mu sync.Mutex `state:"nosave"` + data map[LimitType]Limit +} + +// NewLimitSet creates a new, empty LimitSet. +func NewLimitSet() *LimitSet { + return &LimitSet{ + data: make(map[LimitType]Limit), + } +} + +// GetCopy returns a clone of the LimitSet. +func (l *LimitSet) GetCopy() *LimitSet { + l.mu.Lock() + defer l.mu.Unlock() + copyData := make(map[LimitType]Limit) + for k, v := range l.data { + copyData[k] = v + } + return &LimitSet{ + data: copyData, + } +} + +// Get returns the resource limit associated with LimitType t. +// If no limit is provided, it defaults to an infinite limit.Infinity. +func (l *LimitSet) Get(t LimitType) Limit { + l.mu.Lock() + defer l.mu.Unlock() + s, ok := l.data[t] + if !ok { + return Limit{Cur: Infinity, Max: Infinity} + } + return s +} + +// GetCapped returns the current value for the limit, capped as specified. +func (l *LimitSet) GetCapped(t LimitType, max uint64) uint64 { + s := l.Get(t) + if s.Cur == Infinity || s.Cur > max { + return max + } + return s.Cur +} + +// SetUnchecked assigns value v to resource of LimitType t. +func (l *LimitSet) SetUnchecked(t LimitType, v Limit) { + l.mu.Lock() + defer l.mu.Unlock() + l.data[t] = v +} + +// Set assigns value v to resource of LimitType t and returns the old value. +func (l *LimitSet) Set(t LimitType, v Limit) (Limit, error) { + l.mu.Lock() + defer l.mu.Unlock() + // If a limit is already set, make sure the new limit doesn't + // exceed the previous max limit. + if _, ok := l.data[t]; ok { + if l.data[t].Max < v.Max { + return Limit{}, syscall.EPERM + } + if v.Cur > v.Max { + return Limit{}, syscall.EINVAL + } + } + old := l.data[t] + l.data[t] = v + return old, nil +} diff --git a/pkg/sentry/limits/limits_test.go b/pkg/sentry/limits/limits_test.go new file mode 100644 index 000000000..dd6f80750 --- /dev/null +++ b/pkg/sentry/limits/limits_test.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package limits + +import ( + "syscall" + "testing" +) + +func TestSet(t *testing.T) { + ls := NewLimitSet() + ls.Set(1, Limit{Cur: 50, Max: 50}) + if _, err := ls.Set(1, Limit{Cur: 20, Max: 50}); err != nil { + t.Fatalf("Tried to lower Limit to valid new value: got %v, wanted nil", err) + } + if _, err := ls.Set(1, Limit{Cur: 20, Max: 60}); err != syscall.EPERM { + t.Fatalf("Tried to raise limit.Max to invalid higher value: got %v, wanted syscall.EPERM", err) + } + if _, err := ls.Set(1, Limit{Cur: 60, Max: 50}); err != syscall.EINVAL { + t.Fatalf("Tried to raise limit.Cur to invalid higher value: got %v, wanted syscall.EINVAL", err) + } + if _, err := ls.Set(1, Limit{Cur: 11, Max: 10}); err != syscall.EINVAL { + t.Fatalf("Tried to set new limit with Cur > Max: got %v, wanted syscall.EINVAL", err) + } +} diff --git a/pkg/sentry/limits/linux.go b/pkg/sentry/limits/linux.go new file mode 100644 index 000000000..8e6a24341 --- /dev/null +++ b/pkg/sentry/limits/linux.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package limits + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// FromLinuxResource maps linux resources to sentry LimitTypes. +var FromLinuxResource = map[int]LimitType{ + linux.RLIMIT_CPU: CPU, + linux.RLIMIT_FSIZE: FileSize, + linux.RLIMIT_DATA: Data, + linux.RLIMIT_STACK: Stack, + linux.RLIMIT_CORE: Core, + linux.RLIMIT_RSS: Rss, + linux.RLIMIT_NPROC: ProcessCount, + linux.RLIMIT_NOFILE: NumberOfFiles, + linux.RLIMIT_MEMLOCK: MemoryPagesLocked, + linux.RLIMIT_AS: AS, + linux.RLIMIT_LOCKS: Locks, + linux.RLIMIT_SIGPENDING: SignalsPending, + linux.RLIMIT_MSGQUEUE: MessageQueueBytes, + linux.RLIMIT_NICE: Nice, + linux.RLIMIT_RTPRIO: RealTimePriority, + linux.RLIMIT_RTTIME: Rttime, +} + +// FromLinux maps linux rlimit values to sentry Limits, being careful to handle +// infinities. +func FromLinux(rl uint64) uint64 { + if rl == linux.RLimInfinity { + return Infinity + } + return rl +} + +// ToLinux maps sentry Limits to linux rlimit values, being careful to handle +// infinities. +func ToLinux(l uint64) uint64 { + if l == Infinity { + return linux.RLimInfinity + } + return l +} + +// NewLinuxLimitSet returns a LimitSet whose values match the default rlimits +// in Linux. +func NewLinuxLimitSet() (*LimitSet, error) { + ls := NewLimitSet() + for rlt, rl := range linux.InitRLimits { + lt, ok := FromLinuxResource[rlt] + if !ok { + return nil, fmt.Errorf("unknown rlimit type %v", rlt) + } + ls.SetUnchecked(lt, Limit{ + Cur: FromLinux(rl.Cur), + Max: FromLinux(rl.Max), + }) + } + return ls, nil +} + +// NewLinuxDistroLimitSet returns a new LimitSet whose values are typical +// for a booted Linux distro. +// +// Many Linux init systems adjust the default Linux limits to values more +// expected by the rest of the userspace. NewLinuxDistroLimitSet returns a +// LimitSet with sensible defaults for applications that aren't starting +// their own init system. +func NewLinuxDistroLimitSet() (*LimitSet, error) { + ls, err := NewLinuxLimitSet() + if err != nil { + return nil, err + } + + // Adjust ProcessCount to a lower value because GNU bash allocates 16 + // bytes per proc and OOMs if this number is set too high. Value was + // picked arbitrarily. + // + // 1,048,576 ought to be enough for anyone. + l := ls.Get(ProcessCount) + l.Cur = 1 << 20 + ls.Set(ProcessCount, l) + return ls, nil +} diff --git a/pkg/sentry/loader/BUILD b/pkg/sentry/loader/BUILD new file mode 100644 index 000000000..917ec8cc8 --- /dev/null +++ b/pkg/sentry/loader/BUILD @@ -0,0 +1,59 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_embed_data", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_embed_data( + name = "vdso_bin", + src = "//vdso:vdso.so", + package = "loader", + var = "vdsoBin", +) + +go_stateify( + name = "loader_state", + srcs = [ + "vdso.go", + "vdso_state.go", + ], + out = "loader_state.go", + package = "loader", +) + +go_library( + name = "loader", + srcs = [ + "elf.go", + "interpreter.go", + "loader.go", + "loader_state.go", + "vdso.go", + "vdso_state.go", + ":vdso_bin", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/loader", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi", + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/cpuid", + "//pkg/log", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/mm", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/uniqueid", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/loader/elf.go b/pkg/sentry/loader/elf.go new file mode 100644 index 000000000..d23dc1096 --- /dev/null +++ b/pkg/sentry/loader/elf.go @@ -0,0 +1,637 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loader + +import ( + "bytes" + "debug/elf" + "fmt" + "io" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + // elfMagic identifies an ELF file. + elfMagic = "\x7fELF" + + // maxTotalPhdrSize is the maximum combined size of all program + // headers. Linux limits this to one page. + maxTotalPhdrSize = usermem.PageSize +) + +var ( + // header64Size is the size of elf.Header64. + header64Size = int(binary.Size(elf.Header64{})) + + // Prog64Size is the size of elf.Prog64. + prog64Size = int(binary.Size(elf.Prog64{})) +) + +func progFlagsAsPerms(f elf.ProgFlag) usermem.AccessType { + var p usermem.AccessType + if f&elf.PF_R == elf.PF_R { + p.Read = true + } + if f&elf.PF_W == elf.PF_W { + p.Write = true + } + if f&elf.PF_X == elf.PF_X { + p.Execute = true + } + return p +} + +// elfInfo contains the metadata needed to load an ELF binary. +type elfInfo struct { + // os is the target OS of the ELF. + os abi.OS + + // arch is the target architecture of the ELF. + arch arch.Arch + + // entry is the program entry point. + entry usermem.Addr + + // phdrs are the program headers. + phdrs []elf.ProgHeader + + // phdrSize is the size of a single program header in the ELF. + phdrSize int + + // phdrOff is the offset of the program headers in the file. + phdrOff uint64 + + // sharedObject is true if the ELF represents a shared object. + sharedObject bool +} + +// parseHeader parse the ELF header, verifying that this is a supported ELF +// file and returning the ELF program headers. +// +// This is similar to elf.NewFile, except that it is more strict about what it +// accepts from the ELF, and it doesn't parse unnecessary parts of the file. +// +// ctx may be nil if f does not need it. +func parseHeader(ctx context.Context, f *fs.File) (elfInfo, error) { + // Check ident first; it will tell us the endianness of the rest of the + // structs. + var ident [elf.EI_NIDENT]byte + _, err := readFull(ctx, f, usermem.BytesIOSequence(ident[:]), 0) + if err != nil { + log.Infof("Error reading ELF ident: %v", err) + // The entire ident array always exists. + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = syserror.ENOEXEC + } + return elfInfo{}, err + } + + // Only some callers pre-check the ELF magic. + if !bytes.Equal(ident[:len(elfMagic)], []byte(elfMagic)) { + log.Infof("File is not an ELF") + return elfInfo{}, syserror.ENOEXEC + } + + // We only support 64-bit, little endian binaries + if class := elf.Class(ident[elf.EI_CLASS]); class != elf.ELFCLASS64 { + log.Infof("Unsupported ELF class: %v", class) + return elfInfo{}, syserror.ENOEXEC + } + if endian := elf.Data(ident[elf.EI_DATA]); endian != elf.ELFDATA2LSB { + log.Infof("Unsupported ELF endianness: %v", endian) + return elfInfo{}, syserror.ENOEXEC + } + byteOrder := binary.LittleEndian + + if version := elf.Version(ident[elf.EI_VERSION]); version != elf.EV_CURRENT { + log.Infof("Unsupported ELF version: %v", version) + return elfInfo{}, syserror.ENOEXEC + } + // EI_OSABI is ignored by Linux, which is the only OS supported. + os := abi.Linux + + var hdr elf.Header64 + hdrBuf := make([]byte, header64Size) + _, err = readFull(ctx, f, usermem.BytesIOSequence(hdrBuf), 0) + if err != nil { + log.Infof("Error reading ELF header: %v", err) + // The entire header always exists. + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = syserror.ENOEXEC + } + return elfInfo{}, err + } + binary.Unmarshal(hdrBuf, byteOrder, &hdr) + + // We only support amd64. + if machine := elf.Machine(hdr.Machine); machine != elf.EM_X86_64 { + log.Infof("Unsupported ELF machine %d", machine) + return elfInfo{}, syserror.ENOEXEC + } + a := arch.AMD64 + + var sharedObject bool + elfType := elf.Type(hdr.Type) + switch elfType { + case elf.ET_EXEC: + sharedObject = false + case elf.ET_DYN: + sharedObject = true + default: + log.Infof("Unsupported ELF type %v", elfType) + return elfInfo{}, syserror.ENOEXEC + } + + if int(hdr.Phentsize) != prog64Size { + log.Infof("Unsupported phdr size %d", hdr.Phentsize) + return elfInfo{}, syserror.ENOEXEC + } + totalPhdrSize := prog64Size * int(hdr.Phnum) + if totalPhdrSize < prog64Size { + log.Warningf("No phdrs or total phdr size overflows: prog64Size: %d phnum: %d", prog64Size, int(hdr.Phnum)) + return elfInfo{}, syserror.ENOEXEC + } + if totalPhdrSize > maxTotalPhdrSize { + log.Infof("Too many phdrs (%d): total size %d > %d", hdr.Phnum, totalPhdrSize, maxTotalPhdrSize) + return elfInfo{}, syserror.ENOEXEC + } + + phdrBuf := make([]byte, totalPhdrSize) + _, err = readFull(ctx, f, usermem.BytesIOSequence(phdrBuf), int64(hdr.Phoff)) + if err != nil { + log.Infof("Error reading ELF phdrs: %v", err) + // If phdrs were specified, they should all exist. + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = syserror.ENOEXEC + } + return elfInfo{}, err + } + + phdrs := make([]elf.ProgHeader, hdr.Phnum) + for i := range phdrs { + var prog64 elf.Prog64 + binary.Unmarshal(phdrBuf[:prog64Size], byteOrder, &prog64) + phdrBuf = phdrBuf[prog64Size:] + phdrs[i] = elf.ProgHeader{ + Type: elf.ProgType(prog64.Type), + Flags: elf.ProgFlag(prog64.Flags), + Off: prog64.Off, + Vaddr: prog64.Vaddr, + Paddr: prog64.Paddr, + Filesz: prog64.Filesz, + Memsz: prog64.Memsz, + Align: prog64.Align, + } + } + + return elfInfo{ + os: os, + arch: a, + entry: usermem.Addr(hdr.Entry), + phdrs: phdrs, + phdrOff: hdr.Phoff, + phdrSize: prog64Size, + sharedObject: sharedObject, + }, nil +} + +// mapSegment maps a phdr into the Task. offset is the offset to apply to +// phdr.Vaddr. +func mapSegment(ctx context.Context, m *mm.MemoryManager, f *fs.File, phdr *elf.ProgHeader, offset usermem.Addr) error { + // Alignment of vaddr and offset must match. We'll need to map on the + // page boundary. + adjust := usermem.Addr(phdr.Vaddr).PageOffset() + if adjust != usermem.Addr(phdr.Off).PageOffset() { + ctx.Infof("Alignment of vaddr %#x != off %#x", phdr.Vaddr, phdr.Off) + return syserror.ENOEXEC + } + + addr, ok := offset.AddLength(phdr.Vaddr) + if !ok { + // If offset != 0 we should have ensured this would fit. + ctx.Warningf("Computed segment load address overflows: %#x + %#x", phdr.Vaddr, offset) + return syserror.ENOEXEC + } + addr -= usermem.Addr(adjust) + + fileOffset := phdr.Off - adjust + fileSize := phdr.Filesz + adjust + if fileSize < phdr.Filesz { + ctx.Infof("Computed segment file size overflows: %#x + %#x", phdr.Filesz, adjust) + return syserror.ENOEXEC + } + memSize := phdr.Memsz + adjust + if memSize < phdr.Memsz { + ctx.Infof("Computed segment mem size overflows: %#x + %#x", phdr.Memsz, adjust) + return syserror.ENOEXEC + } + ms, ok := usermem.Addr(fileSize).RoundUp() + if !ok { + ctx.Infof("fileSize %#x too large", fileSize) + return syserror.ENOEXEC + } + mapSize := uint64(ms) + + prot := progFlagsAsPerms(phdr.Flags) + mopts := memmap.MMapOpts{ + Length: mapSize, + Offset: fileOffset, + Addr: addr, + Fixed: true, + // Linux will happily allow conflicting segments to map over + // one another. + Unmap: true, + Private: true, + Perms: prot, + MaxPerms: usermem.AnyAccess, + } + if err := f.ConfigureMMap(ctx, &mopts); err != nil { + ctx.Infof("File is not memory-mappable: %v", err) + return err + } + if _, err := m.MMap(ctx, mopts); err != nil { + ctx.Infof("Error mapping PT_LOAD segment %+v at %#x: %v", phdr, addr, err) + return err + } + + // We need to clear the end of the last page that exceeds fileSize so + // we don't map part of the file beyond fileSize. + // + // Note that Linux *does not* clear the portion of the first page + // before phdr.Off. + if mapSize > fileSize { + zeroAddr, ok := addr.AddLength(fileSize) + if !ok { + panic(fmt.Sprintf("successfully mmaped address overflows? %#x + %#x", addr, fileSize)) + } + zeroSize := int64(mapSize - fileSize) + if zeroSize < 0 { + panic(fmt.Sprintf("zeroSize too big? %#x", uint64(zeroSize))) + } + if _, err := m.ZeroOut(ctx, zeroAddr, zeroSize, usermem.IOOpts{IgnorePermissions: true}); err != nil { + ctx.Warningf("Failed to zero end of page [%#x, %#x): %v", zeroAddr, zeroAddr+usermem.Addr(zeroSize), err) + return err + } + } + + // Allocate more anonymous pages if necessary. + if mapSize < memSize { + anonAddr, ok := addr.AddLength(mapSize) + if !ok { + panic(fmt.Sprintf("anonymous memory doesn't fit in pre-sized range? %#x + %#x", addr, mapSize)) + } + anonSize, ok := usermem.Addr(memSize - mapSize).RoundUp() + if !ok { + ctx.Infof("extra anon pages too large: %#x", memSize-mapSize) + return syserror.ENOEXEC + } + + if _, err := m.MMap(ctx, memmap.MMapOpts{ + Length: uint64(anonSize), + Addr: anonAddr, + // Fixed without Unmap will fail the mmap if something is + // already at addr. + Fixed: true, + Private: true, + Perms: progFlagsAsPerms(phdr.Flags), + MaxPerms: usermem.AnyAccess, + }); err != nil { + ctx.Infof("Error mapping PT_LOAD segment %v anonymous memory: %v", phdr, err) + return err + } + } + + return nil +} + +// loadedELF describes an ELF that has been successfully loaded. +type loadedELF struct { + // os is the target OS of the ELF. + os abi.OS + + // arch is the target architecture of the ELF. + arch arch.Arch + + // entry is the entry point of the ELF. + entry usermem.Addr + + // start is the end of the ELF. + start usermem.Addr + + // end is the end of the ELF. + end usermem.Addr + + // interpter is the path to the ELF interpreter. + interpreter string + + // phdrAddr is the address of the ELF program headers. + phdrAddr usermem.Addr + + // phdrSize is the size of a single program header in the ELF. + phdrSize int + + // phdrNum is the number of program headers. + phdrNum int + + // auxv contains a subset of ELF-specific auxiliary vector entries: + // * AT_PHDR + // * AT_PHENT + // * AT_PHNUM + // * AT_BASE + // * AT_ENTRY + auxv arch.Auxv +} + +// loadParsedELF loads f into mm. +// +// info is the parsed elfInfo from the header. +// +// It does not load the ELF interpreter, or return any auxv entries. +// +// Preconditions: +// * f is an ELF file +func loadParsedELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, info elfInfo, sharedLoadOffset usermem.Addr) (loadedELF, error) { + first := true + var start, end usermem.Addr + var interpreter string + for _, phdr := range info.phdrs { + switch phdr.Type { + case elf.PT_LOAD: + vaddr := usermem.Addr(phdr.Vaddr) + if first { + first = false + start = vaddr + } + if vaddr < end { + ctx.Infof("PT_LOAD headers out-of-order. %#x < %#x", vaddr, end) + return loadedELF{}, syserror.ENOEXEC + } + var ok bool + end, ok = vaddr.AddLength(phdr.Memsz) + if !ok { + ctx.Infof("PT_LOAD header size overflows. %#x + %#x", vaddr, phdr.Memsz) + return loadedELF{}, syserror.ENOEXEC + } + + case elf.PT_INTERP: + if phdr.Filesz > syscall.PathMax { + ctx.Infof("PT_INTERP path too big: %v", phdr.Filesz) + return loadedELF{}, syserror.ENOEXEC + } + + path := make([]byte, phdr.Filesz) + _, err := readFull(ctx, f, usermem.BytesIOSequence(path), int64(phdr.Off)) + if err != nil { + ctx.Infof("Error reading PT_INTERP path: %v", err) + // If an interpreter was specified, it should exist. + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = syserror.ENOEXEC + } + return loadedELF{}, syserror.ENOEXEC + } + + if path[len(path)-1] != 0 { + ctx.Infof("PT_INTERP path not NUL-terminated: %v", path) + return loadedELF{}, syserror.ENOEXEC + } + + // Strip NUL-terminator from string. + interpreter = string(path[:len(path)-1]) + } + } + + // Shared objects don't have fixed load addresses. We need to pick a + // base address big enough to fit all segments, so we first create a + // mapping for the total size just to find a region that is big enough. + // + // It is safe to unmap it immediately with racing with another mapping + // because we are the only one in control of the MemoryManager. + // + // Note that the vaddr of the first PT_LOAD segment is ignored when + // choosing the load address (even if it is non-zero). The vaddr does + // become an offset from that load address. + var offset usermem.Addr + if info.sharedObject { + totalSize := end - start + totalSize, ok := totalSize.RoundUp() + if !ok { + ctx.Infof("ELF PT_LOAD segments too big") + return loadedELF{}, syserror.ENOEXEC + } + + var err error + offset, err = m.MMap(ctx, memmap.MMapOpts{ + Length: uint64(totalSize), + Addr: sharedLoadOffset, + Private: true, + }) + if err != nil { + ctx.Infof("Error allocating address space for shared object: %v", err) + return loadedELF{}, err + } + if err := m.MUnmap(ctx, offset, uint64(totalSize)); err != nil { + panic(fmt.Sprintf("Failed to unmap base address: %v", err)) + } + + start, ok = start.AddLength(uint64(offset)) + if !ok { + panic(fmt.Sprintf("Start %#x + offset %#x overflows?", start, offset)) + } + + end, ok = end.AddLength(uint64(offset)) + if !ok { + panic(fmt.Sprintf("End %#x + offset %#x overflows?", end, offset)) + } + + info.entry, ok = info.entry.AddLength(uint64(offset)) + if !ok { + ctx.Infof("Entrypoint %#x + offset %#x overflows? Is the entrypoint within a segment?", info.entry, offset) + return loadedELF{}, err + } + } + + // Map PT_LOAD segments. + for _, phdr := range info.phdrs { + switch phdr.Type { + case elf.PT_LOAD: + if phdr.Memsz == 0 { + // No need to load segments with size 0, but + // they exist in some binaries. + continue + } + + if err := mapSegment(ctx, m, f, &phdr, offset); err != nil { + ctx.Infof("Failed to map PT_LOAD segment: %+v", phdr) + return loadedELF{}, err + } + } + } + + // This assumes that the first segment contains the ELF headers. This + // may not be true in a malformed ELF, but Linux makes the same + // assumption. + phdrAddr, ok := start.AddLength(info.phdrOff) + if !ok { + ctx.Warningf("ELF start address %#x + phdr offset %#x overflows", start, info.phdrOff) + phdrAddr = 0 + } + + return loadedELF{ + os: info.os, + arch: info.arch, + entry: info.entry, + start: start, + end: end, + interpreter: interpreter, + phdrAddr: phdrAddr, + phdrSize: info.phdrSize, + phdrNum: len(info.phdrs), + }, nil +} + +// loadInitialELF loads f into mm. +// +// It creates an arch.Context for the ELF and prepares the mm for this arch. +// +// It does not load the ELF interpreter, or return any auxv entries. +// +// Preconditions: +// * f is an ELF file +// * f is the first ELF loaded into m +func loadInitialELF(ctx context.Context, m *mm.MemoryManager, fs *cpuid.FeatureSet, f *fs.File) (loadedELF, arch.Context, error) { + info, err := parseHeader(ctx, f) + if err != nil { + ctx.Infof("Failed to parse initial ELF: %v", err) + return loadedELF{}, nil, err + } + + // Create the arch.Context now so we can prepare the mmap layout before + // mapping anything. + ac := arch.New(info.arch, fs) + + l, err := m.SetMmapLayout(ac, limits.FromContext(ctx)) + if err != nil { + ctx.Warningf("Failed to set mmap layout: %v", err) + return loadedELF{}, nil, err + } + + // PIELoadAddress tries to move the ELF out of the way of the default + // mmap base to ensure that the initial brk has sufficient space to + // grow. + le, err := loadParsedELF(ctx, m, f, info, ac.PIELoadAddress(l)) + return le, ac, err +} + +// loadInterpreterELF loads f into mm. +// +// The interpreter must be for the same OS/Arch as the initial ELF. +// +// It does not return any auxv entries. +// +// Preconditions: +// * f is an ELF file +func loadInterpreterELF(ctx context.Context, m *mm.MemoryManager, f *fs.File, initial loadedELF) (loadedELF, error) { + info, err := parseHeader(ctx, f) + if err != nil { + if err == syserror.ENOEXEC { + // Bad interpreter. + err = syserror.ELIBBAD + } + return loadedELF{}, err + } + + if info.os != initial.os { + ctx.Infof("Initial ELF OS %v and interpreter ELF OS %v differ", initial.os, info.os) + return loadedELF{}, syserror.ELIBBAD + } + if info.arch != initial.arch { + ctx.Infof("Initial ELF arch %v and interpreter ELF arch %v differ", initial.arch, info.arch) + return loadedELF{}, syserror.ELIBBAD + } + + // The interpreter is not given a load offset, as its location does not + // affect brk. + return loadParsedELF(ctx, m, f, info, 0) +} + +// loadELF loads f into the Task address space. +// +// If loadELF returns ErrSwitchFile it should be called again with the returned +// path and argv. +// +// Preconditions: +// * f is an ELF file +func loadELF(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, fs *cpuid.FeatureSet, f *fs.File) (loadedELF, arch.Context, error) { + bin, ac, err := loadInitialELF(ctx, m, fs, f) + if err != nil { + ctx.Infof("Error loading binary: %v", err) + return loadedELF{}, nil, err + } + + var interp loadedELF + if bin.interpreter != "" { + d, i, err := openPath(ctx, mounts, root, wd, maxTraversals, bin.interpreter) + if err != nil { + ctx.Infof("Error opening interpreter %s: %v", bin.interpreter, err) + return loadedELF{}, nil, err + } + defer i.DecRef() + // We don't need the Dirent. + d.DecRef() + + interp, err = loadInterpreterELF(ctx, m, i, bin) + if err != nil { + ctx.Infof("Error loading interpreter: %v", err) + return loadedELF{}, nil, err + } + + if interp.interpreter != "" { + // No recursive interpreters! + ctx.Infof("Interpreter requires an interpreter") + return loadedELF{}, nil, syserror.ENOEXEC + } + } + + // ELF-specific auxv entries. + bin.auxv = arch.Auxv{ + arch.AuxEntry{linux.AT_PHDR, bin.phdrAddr}, + arch.AuxEntry{linux.AT_PHENT, usermem.Addr(bin.phdrSize)}, + arch.AuxEntry{linux.AT_PHNUM, usermem.Addr(bin.phdrNum)}, + arch.AuxEntry{linux.AT_ENTRY, bin.entry}, + } + if bin.interpreter != "" { + bin.auxv = append(bin.auxv, arch.AuxEntry{linux.AT_BASE, interp.start}) + + // Start in the interpreter. + // N.B. AT_ENTRY above contains the *original* entry point. + bin.entry = interp.entry + } + + return bin, ac, nil +} diff --git a/pkg/sentry/loader/interpreter.go b/pkg/sentry/loader/interpreter.go new file mode 100644 index 000000000..b8ecbe92f --- /dev/null +++ b/pkg/sentry/loader/interpreter.go @@ -0,0 +1,105 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loader + +import ( + "bytes" + "io" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + // interpreterScriptMagic identifies an interpreter script. + interpreterScriptMagic = "#!" + + // interpMaxLineLength is the maximum length for the first line of an + // interpreter script. + // + // From execve(2): "A maximum line length of 127 characters is allowed + // for the first line in a #! executable shell script." + interpMaxLineLength = 127 +) + +// parseInterpreterScript returns the interpreter path and argv. +func parseInterpreterScript(ctx context.Context, filename string, f *fs.File, argv, envv []string) (newpath string, newargv []string, err error) { + line := make([]byte, interpMaxLineLength) + n, err := readFull(ctx, f, usermem.BytesIOSequence(line), 0) + // Short read is OK. + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + err = syserror.ENOEXEC + } + return "", []string{}, err + } + line = line[:n] + + if !bytes.Equal(line[:2], []byte(interpreterScriptMagic)) { + return "", []string{}, syserror.ENOEXEC + } + // Ignore #!. + line = line[2:] + + // Ignore everything after newline. + // Linux silently truncates the remainder of the line if it exceeds + // interpMaxLineLength. + i := bytes.IndexByte(line, '\n') + if i > 0 { + line = line[:i] + } + + // Skip any whitespace before the interpeter. + line = bytes.TrimLeft(line, " \t") + + // Linux only looks for a space or tab delimiting the interpreter and + // arg. + // + // execve(2): "On Linux, the entire string following the interpreter + // name is passed as a single argument to the interpreter, and this + // string can include white space." + interp := line + var arg []byte + i = bytes.IndexAny(line, " \t") + if i >= 0 { + interp = line[:i] + if i+1 < len(line) { + arg = line[i+1:] + } + } + + // Build the new argument list: + // + // 1. The interpreter. + newargv = append(newargv, string(interp)) + + // 2. The optional interpreter argument. + if len(arg) > 0 { + newargv = append(newargv, string(arg)) + } + + // 3. The original arguments. The original argv[0] is replaced with the + // full script filename. + if len(argv) > 0 { + argv[0] = filename + } else { + argv = []string{filename} + } + newargv = append(newargv, argv...) + + return string(interp), newargv, nil +} diff --git a/pkg/sentry/loader/loader.go b/pkg/sentry/loader/loader.go new file mode 100644 index 000000000..94c281b72 --- /dev/null +++ b/pkg/sentry/loader/loader.go @@ -0,0 +1,277 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loader loads a binary into a MemoryManager. +package loader + +import ( + "bytes" + "crypto/rand" + "io" + "path" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// readFull behaves like io.ReadFull for an *fs.File. +func readFull(ctx context.Context, f *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + var total int64 + for dst.NumBytes() > 0 { + n, err := f.Preadv(ctx, dst, offset+total) + total += n + if err == io.EOF && total != 0 { + return total, io.ErrUnexpectedEOF + } else if err != nil { + return total, err + } + dst = dst.DropFirst64(n) + } + return total, nil +} + +// openPath opens name for loading. +// +// openPath returns the fs.Dirent and an *fs.File for name, which is not +// installed in the Task FDMap. The caller takes ownership of both. +// +// name must be a readable, executable, regular file. +func openPath(ctx context.Context, mm *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, name string) (*fs.Dirent, *fs.File, error) { + d, err := mm.FindInode(ctx, root, wd, name, maxTraversals) + if err != nil { + return nil, nil, err + } + defer d.DecRef() + + perms := fs.PermMask{ + // TODO: Linux requires only execute permission, + // not read. However, our backing filesystems may prevent us + // from reading the file without read permission. + // + // Additionally, a task with a non-readable executable has + // additional constraints on access via ptrace and procfs. + Read: true, + Execute: true, + } + if err := d.Inode.CheckPermission(ctx, perms); err != nil { + return nil, nil, err + } + + // If they claim it's a directory, then make sure. + // + // N.B. we reject directories below, but we must first reject + // non-directories passed as directories. + if len(name) > 0 && name[len(name)-1] == '/' && !fs.IsDir(d.Inode.StableAttr) { + return nil, nil, syserror.ENOTDIR + } + + // No exec-ing directories, pipes, etc! + if !fs.IsRegular(d.Inode.StableAttr) { + ctx.Infof("Error regularing %s: %v", name, d.Inode.StableAttr) + return nil, nil, syserror.EACCES + } + + // Create a new file. + file, err := d.Inode.GetFile(ctx, d, fs.FileFlags{Read: true}) + if err != nil { + return nil, nil, err + } + + // We must be able to read at arbitrary offsets. + if !file.Flags().Pread { + file.DecRef() + ctx.Infof("%s cannot be read at an offset: %+v", name, file.Flags()) + return nil, nil, syserror.EACCES + } + + // Grab a reference for the caller. + d.IncRef() + return d, file, nil +} + +// allocStack allocates and maps a stack in to any available part of the address space. +func allocStack(ctx context.Context, m *mm.MemoryManager, a arch.Context) (*arch.Stack, error) { + ar, err := m.MapStack(ctx) + if err != nil { + return nil, err + } + return &arch.Stack{a, m, ar.End}, nil +} + +const ( + // maxLoaderAttempts is the maximum number of attempts to try to load + // an interpreter scripts, to prevent loops. 6 (inital + 5 changes) is + // what the Linux kernel allows (fs/exec.c:search_binary_handler). + maxLoaderAttempts = 6 +) + +// loadPath resolves filename to a binary and loads it. +// +// It returns: +// * loadedELF, description of the loaded binary +// * arch.Context matching the binary arch +// * fs.Dirent of the binary file +// * Possibly updated argv +func loadPath(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, fs *cpuid.FeatureSet, filename string, argv, envv []string) (loadedELF, arch.Context, *fs.Dirent, []string, error) { + for i := 0; i < maxLoaderAttempts; i++ { + d, f, err := openPath(ctx, mounts, root, wd, maxTraversals, filename) + if err != nil { + ctx.Infof("Error opening %s: %v", filename, err) + return loadedELF{}, nil, nil, nil, err + } + defer f.DecRef() + // We will return d in the successful case, but defer a DecRef + // for intermediate loops and failure cases. + defer d.DecRef() + + // Check the header. Is this an ELF or interpreter script? + var hdr [4]uint8 + // N.B. We assume that reading from a regular file cannot block. + _, err = readFull(ctx, f, usermem.BytesIOSequence(hdr[:]), 0) + // Allow unexpected EOF, as a valid executable could be only three + // bytes (e.g., #!a). + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + err = syserror.ENOEXEC + } + return loadedELF{}, nil, nil, nil, err + } + + switch { + case bytes.Equal(hdr[:], []byte(elfMagic)): + loaded, ac, err := loadELF(ctx, m, mounts, root, wd, maxTraversals, fs, f) + if err != nil { + ctx.Infof("Error loading ELF: %v", err) + return loadedELF{}, nil, nil, nil, err + } + // An ELF is always terminal. Hold on to d. + d.IncRef() + return loaded, ac, d, argv, err + case bytes.Equal(hdr[:2], []byte(interpreterScriptMagic)): + newpath, newargv, err := parseInterpreterScript(ctx, filename, f, argv, envv) + if err != nil { + ctx.Infof("Error loading interpreter script: %v", err) + return loadedELF{}, nil, nil, nil, err + } + filename = newpath + argv = newargv + default: + ctx.Infof("Unknown magic: %v", hdr) + return loadedELF{}, nil, nil, nil, syserror.ENOEXEC + } + } + + return loadedELF{}, nil, nil, nil, syserror.ELOOP +} + +// Load loads filename into a MemoryManager. +// +// If Load returns ErrSwitchFile it should be called again with the returned +// path and argv. +// +// Preconditions: +// * The Task MemoryManager is empty. +// * Load is called on the Task goroutine. +func Load(ctx context.Context, m *mm.MemoryManager, mounts *fs.MountNamespace, root, wd *fs.Dirent, maxTraversals uint, fs *cpuid.FeatureSet, filename string, argv, envv []string, extraAuxv []arch.AuxEntry, vdso *VDSO) (abi.OS, arch.Context, string, error) { + // Load the binary itself. + loaded, ac, d, argv, err := loadPath(ctx, m, mounts, root, wd, maxTraversals, fs, filename, argv, envv) + if err != nil { + ctx.Infof("Failed to load %s: %v", filename, err) + return 0, nil, "", err + } + defer d.DecRef() + + // Load the VDSO. + vdsoAddr, err := loadVDSO(ctx, m, vdso, loaded) + if err != nil { + ctx.Infof("Error loading VDSO: %v", err) + return 0, nil, "", err + } + + // Setup the heap. brk starts at the next page after the end of the + // binary. Userspace can assume that the remainer of the page after + // loaded.end is available for its use. + e, ok := loaded.end.RoundUp() + if !ok { + ctx.Warningf("brk overflows: %#x", loaded.end) + return 0, nil, "", syserror.ENOEXEC + } + m.BrkSetup(ctx, e) + + // Allocate our stack. + stack, err := allocStack(ctx, m, ac) + if err != nil { + ctx.Infof("Failed to allocate stack: %v", err) + return 0, nil, "", err + } + + // Push the original filename to the stack, for AT_EXECFN. + execfn, err := stack.Push(filename) + if err != nil { + ctx.Infof("Failed to push exec filename: %v", err) + return 0, nil, "", err + } + + // Push 16 random bytes on the stack which AT_RANDOM will point to. + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + ctx.Infof("Failed to read random bytes: %v", err) + return 0, nil, "", err + } + random, err := stack.Push(b) + if err != nil { + ctx.Infof("Failed to push random bytes: %v", err) + return 0, nil, "", err + } + + // Add generic auxv entries + auxv := append(loaded.auxv, arch.Auxv{ + arch.AuxEntry{linux.AT_CLKTCK, linux.CLOCKS_PER_SEC}, + arch.AuxEntry{linux.AT_EXECFN, execfn}, + arch.AuxEntry{linux.AT_RANDOM, random}, + arch.AuxEntry{linux.AT_PAGESZ, usermem.PageSize}, + arch.AuxEntry{linux.AT_SYSINFO_EHDR, vdsoAddr}, + }...) + auxv = append(auxv, extraAuxv...) + + sl, err := stack.Load(argv, envv, auxv) + if err != nil { + ctx.Infof("Failed to load stack: %v", err) + return 0, nil, "", err + } + + m.SetArgvStart(sl.ArgvStart) + m.SetArgvEnd(sl.ArgvEnd) + m.SetEnvvStart(sl.EnvvStart) + m.SetEnvvEnd(sl.EnvvEnd) + m.SetAuxv(auxv) + m.SetExecutable(d) + + ac.SetIP(uintptr(loaded.entry)) + ac.SetStack(uintptr(stack.Bottom)) + + name := path.Base(filename) + if len(name) > linux.TASK_COMM_LEN-1 { + name = name[:linux.TASK_COMM_LEN-1] + } + + return loaded.os, ac, name, nil +} diff --git a/pkg/sentry/loader/vdso.go b/pkg/sentry/loader/vdso.go new file mode 100644 index 000000000..ce4f6f5d9 --- /dev/null +++ b/pkg/sentry/loader/vdso.go @@ -0,0 +1,382 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loader + +import ( + "debug/elf" + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// byteReaderFileOperations implements fs.FileOperations for reading +// from a []byte source. +type byteReader struct { + fsutil.NoopRelease + fsutil.PipeSeek + fsutil.NotDirReaddir + fsutil.NoFsync + fsutil.NoopFlush + fsutil.NoMMap + fsutil.NoIoctl + waiter.AlwaysReady + data []byte +} + +type fileContext struct { + context.Context +} + +func (f *fileContext) Value(key interface{}) interface{} { + switch key { + case uniqueid.CtxGlobalUniqueID: + return uint64(0) + default: + return f.Context.Value(key) + } +} + +func newByteReaderFile(data []byte) *fs.File { + dirent := fs.NewTransientDirent(nil) + flags := fs.FileFlags{Read: true, Pread: true} + return fs.NewFile(&fileContext{Context: context.Background()}, dirent, flags, &byteReader{ + data: data, + }) +} + +func (b *byteReader) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + if offset < 0 { + return 0, syserror.EINVAL + } + if offset >= int64(len(b.data)) { + return 0, io.EOF + } + n, err := dst.CopyOut(ctx, b.data[offset:]) + return int64(n), err +} + +func (b *byteReader) Write(ctx context.Context, file *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + panic("Write not supported") +} + +// validateVDSO checks that the VDSO can be loaded by loadVDSO. +// +// VDSOs are special (see below). Since we are going to map the VDSO directly +// rather than using a normal loading process, we require that the PT_LOAD +// segments have the same layout in the ELF as they expect to have in memory. +// +// Namely, this means that we must verify: +// * PT_LOAD file offsets are equivalent to the memory offset from the first +// segment. +// * No extra zeroed space (memsz) is required. +// * PT_LOAD segments are in order. +// * No two PT_LOAD segments occupy parts of the same page. +// * PT_LOAD segments don't extend beyond the end of the file. +// +// ctx may be nil if f does not need it. +func validateVDSO(ctx context.Context, f *fs.File, size uint64) (elfInfo, error) { + info, err := parseHeader(ctx, f) + if err != nil { + log.Infof("Unable to parse VDSO header: %v", err) + return elfInfo{}, err + } + + var first *elf.ProgHeader + var prev *elf.ProgHeader + var prevEnd usermem.Addr + for i, phdr := range info.phdrs { + if phdr.Type != elf.PT_LOAD { + continue + } + + if first == nil { + first = &info.phdrs[i] + if phdr.Off != 0 { + log.Warningf("First PT_LOAD segment has non-zero file offset") + return elfInfo{}, syserror.ENOEXEC + } + } + + memoryOffset := phdr.Vaddr - first.Vaddr + if memoryOffset != phdr.Off { + log.Warningf("PT_LOAD segment memory offset %#x != file offset %#x", memoryOffset, phdr.Off) + return elfInfo{}, syserror.ENOEXEC + } + + // memsz larger than filesz means that extra zeroed space should be + // provided at the end of the segment. Since we are mapping the ELF + // directly, we don't want to just overwrite part of the ELF with + // zeroes. + if phdr.Memsz != phdr.Filesz { + log.Warningf("PT_LOAD segment memsz %#x != filesz %#x", phdr.Memsz, phdr.Filesz) + return elfInfo{}, syserror.ENOEXEC + } + + start := usermem.Addr(memoryOffset) + end, ok := start.AddLength(phdr.Memsz) + if !ok { + log.Warningf("PT_LOAD segment size overflows: %#x + %#x", start, end) + return elfInfo{}, syserror.ENOEXEC + } + if uint64(end) > size { + log.Warningf("PT_LOAD segment end %#x extends beyond end of file %#x", end, size) + return elfInfo{}, syserror.ENOEXEC + } + + if prev != nil { + if start < prevEnd { + log.Warningf("PT_LOAD segments out of order") + return elfInfo{}, syserror.ENOEXEC + } + + // We mprotect entire pages, so each segment must be in + // its own page. + prevEndPage := prevEnd.RoundDown() + startPage := start.RoundDown() + if prevEndPage >= startPage { + log.Warningf("PT_LOAD segments share a page: %#x", prevEndPage) + return elfInfo{}, syserror.ENOEXEC + } + } + prev = &info.phdrs[i] + prevEnd = end + } + + return info, nil +} + +// VDSO describes a VDSO. +// +// NOTE: to support multiple architectures or operating systems, this +// would need to contain a VDSO for each. +type VDSO struct { + // ParamPage is the VDSO parameter page. This page should be updated to + // inform the VDSO for timekeeping data. + ParamPage *mm.SpecialMappable + + // vdso is the VDSO ELF itself. + vdso *mm.SpecialMappable + + // os is the operating system targeted by the VDSO. + os abi.OS + + // arch is the architecture targeted by the VDSO. + arch arch.Arch + + // phdrs are the VDSO ELF phdrs. + phdrs []elf.ProgHeader `state:".([]elfProgHeader)"` +} + +// PrepareVDSO validates the system VDSO and returns a VDSO, containing the +// param page for updating by the kernel. +func PrepareVDSO(p platform.Platform) (*VDSO, error) { + vdsoFile := newByteReaderFile(vdsoBin) + + // First make sure the VDSO is valid. vdsoFile does not use ctx, so a + // nil context can be passed. + info, err := validateVDSO(nil, vdsoFile, uint64(len(vdsoBin))) + if err != nil { + return nil, err + } + + // Then copy it into a VDSO mapping. + size, ok := usermem.Addr(len(vdsoBin)).RoundUp() + if !ok { + return nil, fmt.Errorf("VDSO size overflows? %#x", len(vdsoBin)) + } + + vdso, err := p.Memory().Allocate(uint64(size), usage.System) + if err != nil { + return nil, fmt.Errorf("unable to allocate VDSO memory: %v", err) + } + + ims, err := p.Memory().MapInternal(vdso, usermem.ReadWrite) + if err != nil { + p.Memory().DecRef(vdso) + return nil, fmt.Errorf("unable to map VDSO memory: %v", err) + } + + _, err = safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(vdsoBin))) + if err != nil { + p.Memory().DecRef(vdso) + return nil, fmt.Errorf("unable to copy VDSO into memory: %v", err) + } + + // Finally, allocate a param page for this VDSO. + paramPage, err := p.Memory().Allocate(usermem.PageSize, usage.System) + if err != nil { + p.Memory().DecRef(vdso) + return nil, fmt.Errorf("unable to allocate VDSO param page: %v", err) + } + + return &VDSO{ + ParamPage: mm.NewSpecialMappable("[vvar]", p, paramPage), + // TODO: Don't advertise the VDSO, as some applications may + // not be able to handle multiple [vdso] hints. + vdso: mm.NewSpecialMappable("", p, vdso), + phdrs: info.phdrs, + }, nil +} + +// loadVDSO loads the VDSO into m. +// +// VDSOs are special. +// +// VDSOs are fully position independent. However, instead of loading a VDSO +// like a normal ELF binary, mapping only the PT_LOAD segments, the Linux +// kernel simply directly maps the entire file into process memory, with very +// little real ELF parsing. +// +// NOTE: This means that userspace can, and unfortunately does, +// depend on parts of the ELF that would normally not be mapped. To maintain +// compatibility with such binaries, we load the VDSO much like Linux. +// +// loadVDSO takes a reference on the VDSO and parameter page FrameRegions. +func loadVDSO(ctx context.Context, m *mm.MemoryManager, v *VDSO, bin loadedELF) (usermem.Addr, error) { + if v == nil { + // Should be used only by tests. + ctx.Warningf("No VDSO provided, skipping VDSO mapping") + return 0, nil + } + + if v.os != bin.os { + ctx.Warningf("Binary ELF OS %v and VDSO ELF OS %v differ", bin.os, v.os) + return 0, syserror.ENOEXEC + } + if v.arch != bin.arch { + ctx.Warningf("Binary ELF arch %v and VDSO ELF arch %v differ", bin.arch, v.arch) + return 0, syserror.ENOEXEC + } + + // Reserve address space for the VDSO and its parameter page, which is + // mapped just before the VDSO. + mapSize := v.vdso.Length() + v.ParamPage.Length() + addr, err := m.MMap(ctx, memmap.MMapOpts{ + Length: mapSize, + Private: true, + }) + if err != nil { + ctx.Infof("Unable to reserve VDSO address space: %v", err) + return 0, err + } + + // Now map the param page. + _, err = m.MMap(ctx, memmap.MMapOpts{ + Length: v.ParamPage.Length(), + MappingIdentity: v.ParamPage, + Mappable: v.ParamPage, + Addr: addr, + Fixed: true, + Unmap: true, + Private: true, + Perms: usermem.Read, + MaxPerms: usermem.Read, + }) + if err != nil { + ctx.Infof("Unable to map VDSO param page: %v", err) + return 0, err + } + + // Now map the VDSO itself. + vdsoAddr, ok := addr.AddLength(v.ParamPage.Length()) + if !ok { + panic(fmt.Sprintf("Part of mapped range overflows? %#x + %#x", addr, v.ParamPage.Length())) + } + _, err = m.MMap(ctx, memmap.MMapOpts{ + Length: v.vdso.Length(), + MappingIdentity: v.vdso, + Mappable: v.vdso, + Addr: vdsoAddr, + Fixed: true, + Unmap: true, + Private: true, + Perms: usermem.Read, + MaxPerms: usermem.AnyAccess, + }) + if err != nil { + ctx.Infof("Unable to map VDSO: %v", err) + return 0, err + } + + vdsoEnd, ok := vdsoAddr.AddLength(v.vdso.Length()) + if !ok { + panic(fmt.Sprintf("VDSO mapping overflows? %#x + %#x", vdsoAddr, v.vdso.Length())) + } + + // Set additional protections for the individual segments. + var first *elf.ProgHeader + for i, phdr := range v.phdrs { + if phdr.Type != elf.PT_LOAD { + continue + } + + if first == nil { + first = &v.phdrs[i] + } + + memoryOffset := phdr.Vaddr - first.Vaddr + segAddr, ok := vdsoAddr.AddLength(memoryOffset) + if !ok { + ctx.Warningf("PT_LOAD segment address overflows: %#x + %#x", segAddr, memoryOffset) + return 0, syserror.ENOEXEC + } + segPage := segAddr.RoundDown() + segSize := usermem.Addr(phdr.Memsz) + segSize, ok = segSize.AddLength(segAddr.PageOffset()) + if !ok { + ctx.Warningf("PT_LOAD segment memsize %#x + offset %#x overflows", phdr.Memsz, segAddr.PageOffset()) + return 0, syserror.ENOEXEC + } + segSize, ok = segSize.RoundUp() + if !ok { + ctx.Warningf("PT_LOAD segment size overflows: %#x", phdr.Memsz+segAddr.PageOffset()) + return 0, syserror.ENOEXEC + } + segEnd, ok := segPage.AddLength(uint64(segSize)) + if !ok { + ctx.Warningf("PT_LOAD segment range overflows: %#x + %#x", segAddr, segSize) + return 0, syserror.ENOEXEC + } + if segEnd > vdsoEnd { + ctx.Warningf("PT_LOAD segment ends beyond VDSO: %#x > %#x", segEnd, vdsoEnd) + return 0, syserror.ENOEXEC + } + + perms := progFlagsAsPerms(phdr.Flags) + if perms != usermem.Read { + if err := m.MProtect(segPage, uint64(segSize), perms, false); err != nil { + ctx.Warningf("Unable to set PT_LOAD segment protections %+v at [%#x, %#x): %v", perms, segAddr, segEnd, err) + return 0, syserror.ENOEXEC + } + } + } + + return vdsoAddr, nil +} diff --git a/pkg/sentry/loader/vdso_state.go b/pkg/sentry/loader/vdso_state.go new file mode 100644 index 000000000..92004ad9e --- /dev/null +++ b/pkg/sentry/loader/vdso_state.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loader + +import ( + "debug/elf" +) + +type elfProgHeader struct { + Type elf.ProgType + Flags elf.ProgFlag + Off uint64 + Vaddr uint64 + Paddr uint64 + Filesz uint64 + Memsz uint64 + Align uint64 +} + +// savePhdrs is invoked by stateify. +func (v *VDSO) savePhdrs() []elfProgHeader { + s := make([]elfProgHeader, 0, len(v.phdrs)) + for _, h := range v.phdrs { + s = append(s, elfProgHeader(h)) + } + return s +} + +// loadPhdrs is invoked by stateify. +func (v *VDSO) loadPhdrs(s []elfProgHeader) { + v.phdrs = make([]elf.ProgHeader, 0, len(s)) + for _, h := range s { + v.phdrs = append(v.phdrs, elf.ProgHeader(h)) + } +} diff --git a/pkg/sentry/memmap/BUILD b/pkg/sentry/memmap/BUILD new file mode 100644 index 000000000..7525fea45 --- /dev/null +++ b/pkg/sentry/memmap/BUILD @@ -0,0 +1,71 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "memmap_state", + srcs = [ + "mappable_range.go", + "mapping_set.go", + "mapping_set_impl.go", + ], + out = "memmap_state.go", + package = "memmap", +) + +go_template_instance( + name = "mappable_range", + out = "mappable_range.go", + package = "memmap", + prefix = "Mappable", + template = "//pkg/segment:generic_range", + types = { + "T": "uint64", + }, +) + +go_template_instance( + name = "mapping_set_impl", + out = "mapping_set_impl.go", + package = "memmap", + prefix = "Mapping", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "MappableRange", + "Value": "MappingsOfRange", + "Functions": "mappingSetFunctions", + }, +) + +go_library( + name = "memmap", + srcs = [ + "mappable_range.go", + "mapping_set.go", + "mapping_set_impl.go", + "memmap.go", + "memmap_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/memmap", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/refs", + "//pkg/sentry/context", + "//pkg/sentry/platform", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + ], +) + +go_test( + name = "memmap_test", + size = "small", + srcs = ["mapping_set_test.go"], + embed = [":memmap"], + deps = ["//pkg/sentry/usermem"], +) diff --git a/pkg/sentry/memmap/mapping_set.go b/pkg/sentry/memmap/mapping_set.go new file mode 100644 index 000000000..0cd42ffbf --- /dev/null +++ b/pkg/sentry/memmap/mapping_set.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memmap + +import ( + "fmt" + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// MappingSet maps offsets into a Mappable to mappings of those offsets. It is +// used to implement Mappable.AddMapping and RemoveMapping for Mappables that +// may need to call MappingSpace.Invalidate. +// +// type MappingSet <generated by go_generics> + +// MappingsOfRange is the value type of MappingSet, and represents the set of +// all mappings of the corresponding MappableRange. +// +// Using a map offers O(1) lookups in RemoveMapping and +// mappingSetFunctions.Merge. +type MappingsOfRange map[MappingOfRange]struct{} + +// MappingOfRange represents a mapping of a MappableRange. +type MappingOfRange struct { + MappingSpace MappingSpace + AddrRange usermem.AddrRange +} + +func (r MappingOfRange) invalidate(opts InvalidateOpts) { + r.MappingSpace.Invalidate(r.AddrRange, opts) +} + +// String implements fmt.Stringer.String. +func (r MappingOfRange) String() string { + return fmt.Sprintf("%#v", r.AddrRange) +} + +// mappingSetFunctions implements segment.Functions for MappingSet. +type mappingSetFunctions struct{} + +// MinKey implements segment.Functions.MinKey. +func (mappingSetFunctions) MinKey() uint64 { + return 0 +} + +// MaxKey implements segment.Functions.MaxKey. +func (mappingSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +// ClearValue implements segment.Functions.ClearValue. +func (mappingSetFunctions) ClearValue(v *MappingsOfRange) { + *v = MappingsOfRange{} +} + +// Merge implements segment.Functions.Merge. +// +// Since each value is a map of MappingOfRanges, values can only be merged if +// all MappingOfRanges in each map have an exact pair in the other map, forming +// one contiguous region. +func (mappingSetFunctions) Merge(r1 MappableRange, val1 MappingsOfRange, r2 MappableRange, val2 MappingsOfRange) (MappingsOfRange, bool) { + if len(val1) != len(val2) { + return nil, false + } + + merged := make(MappingsOfRange, len(val1)) + + // Each MappingOfRange in val1 must have a matching region in val2, forming + // one contiguous region. + for k1 := range val1 { + // We expect val2 to to contain a key that forms a contiguous + // region with k1. + k2 := MappingOfRange{ + MappingSpace: k1.MappingSpace, + AddrRange: usermem.AddrRange{ + Start: k1.AddrRange.End, + End: k1.AddrRange.End + usermem.Addr(r2.Length()), + }, + } + if _, ok := val2[k2]; !ok { + return nil, false + } + + // OK. Add it to the merged map. + merged[MappingOfRange{ + MappingSpace: k1.MappingSpace, + AddrRange: usermem.AddrRange{ + Start: k1.AddrRange.Start, + End: k2.AddrRange.End, + }, + }] = struct{}{} + } + + return merged, true +} + +// Split implements segment.Functions.Split. +func (mappingSetFunctions) Split(r MappableRange, val MappingsOfRange, split uint64) (MappingsOfRange, MappingsOfRange) { + if split <= r.Start || split >= r.End { + panic(fmt.Sprintf("split is not within range %v", r)) + } + + m1 := make(MappingsOfRange, len(val)) + m2 := make(MappingsOfRange, len(val)) + + // split is a value in MappableRange, we need the offset into the + // corresponding MappingsOfRange. + offset := usermem.Addr(split - r.Start) + for k := range val { + k1 := MappingOfRange{ + MappingSpace: k.MappingSpace, + AddrRange: usermem.AddrRange{ + Start: k.AddrRange.Start, + End: k.AddrRange.Start + offset, + }, + } + m1[k1] = struct{}{} + + k2 := MappingOfRange{ + MappingSpace: k.MappingSpace, + AddrRange: usermem.AddrRange{ + Start: k.AddrRange.Start + offset, + End: k.AddrRange.End, + }, + } + m2[k2] = struct{}{} + } + + return m1, m2 +} + +// subsetMapping returns the MappingOfRange that maps subsetRange, given that +// ms maps wholeRange beginning at addr. +// +// For instance, suppose wholeRange = [0x0, 0x2000) and addr = 0x4000, +// indicating that ms maps addresses [0x4000, 0x6000) to MappableRange [0x0, +// 0x2000). Then for subsetRange = [0x1000, 0x2000), subsetMapping returns a +// MappingOfRange for which AddrRange = [0x5000, 0x6000). +func subsetMapping(wholeRange, subsetRange MappableRange, ms MappingSpace, addr usermem.Addr) MappingOfRange { + if !wholeRange.IsSupersetOf(subsetRange) { + panic(fmt.Sprintf("%v is not a superset of %v", wholeRange, subsetRange)) + } + + offset := subsetRange.Start - wholeRange.Start + start := addr + usermem.Addr(offset) + return MappingOfRange{ + MappingSpace: ms, + AddrRange: usermem.AddrRange{ + Start: start, + End: start + usermem.Addr(subsetRange.Length()), + }, + } +} + +// AddMapping adds the given mapping and returns the set of MappableRanges that +// previously had no mappings. +// +// Preconditions: As for Mappable.AddMapping. +func (s *MappingSet) AddMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64) []MappableRange { + mr := MappableRange{offset, offset + uint64(ar.Length())} + var mapped []MappableRange + seg, gap := s.Find(mr.Start) + for { + switch { + case seg.Ok() && seg.Start() < mr.End: + seg = s.Isolate(seg, mr) + seg.Value()[subsetMapping(mr, seg.Range(), ms, ar.Start)] = struct{}{} + seg, gap = seg.NextNonEmpty() + + case gap.Ok() && gap.Start() < mr.End: + gapMR := gap.Range().Intersect(mr) + mapped = append(mapped, gapMR) + // Insert a set and continue from the above case. + seg, gap = s.Insert(gap, gapMR, make(MappingsOfRange)), MappingGapIterator{} + + default: + return mapped + } + } +} + +// RemoveMapping removes the given mapping and returns the set of +// MappableRanges that now have no mappings. +// +// Preconditions: As for Mappable.RemoveMapping. +func (s *MappingSet) RemoveMapping(ms MappingSpace, ar usermem.AddrRange, offset uint64) []MappableRange { + mr := MappableRange{offset, offset + uint64(ar.Length())} + var unmapped []MappableRange + + seg := s.FindSegment(mr.Start) + if !seg.Ok() { + panic(fmt.Sprintf("MappingSet.RemoveMapping(%v): no segment containing %#x: %v", mr, mr.Start, s)) + } + for seg.Ok() && seg.Start() < mr.End { + // Ensure this segment is limited to our range. + seg = s.Isolate(seg, mr) + + // Remove this part of the mapping. + mappings := seg.Value() + delete(mappings, subsetMapping(mr, seg.Range(), ms, ar.Start)) + + if len(mappings) == 0 { + unmapped = append(unmapped, seg.Range()) + seg = s.Remove(seg).NextSegment() + } else { + seg = seg.NextSegment() + } + } + s.MergeAdjacent(mr) + return unmapped +} + +// Invalidate calls MappingSpace.Invalidate for all mappings of offsets in mr. +func (s *MappingSet) Invalidate(mr MappableRange, opts InvalidateOpts) { + for seg := s.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() { + segMR := seg.Range() + for m := range seg.Value() { + region := subsetMapping(segMR, segMR.Intersect(mr), m.MappingSpace, m.AddrRange.Start) + region.invalidate(opts) + } + } +} + +// InvalidateAll calls MappingSpace.Invalidate for all mappings of s. +func (s *MappingSet) InvalidateAll(opts InvalidateOpts) { + for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + for m := range seg.Value() { + m.invalidate(opts) + } + } +} diff --git a/pkg/sentry/memmap/mapping_set_test.go b/pkg/sentry/memmap/mapping_set_test.go new file mode 100644 index 000000000..10668d404 --- /dev/null +++ b/pkg/sentry/memmap/mapping_set_test.go @@ -0,0 +1,186 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memmap + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type testMappingSpace struct { + // Ideally we'd store the full ranges that were invalidated, rather + // than individual calls to Invalidate, as they are an implementation + // detail, but this is the simplest way for now. + inv []usermem.AddrRange +} + +func (n *testMappingSpace) reset() { + n.inv = []usermem.AddrRange{} +} + +func (n *testMappingSpace) Invalidate(ar usermem.AddrRange, opts InvalidateOpts) { + n.inv = append(n.inv, ar) +} + +func TestAddRemoveMapping(t *testing.T) { + set := MappingSet{} + ms := &testMappingSpace{} + + mapped := set.AddMapping(ms, usermem.AddrRange{0x10000, 0x12000}, 0x1000) + if got, want := mapped, []MappableRange{{0x1000, 0x3000}}; !reflect.DeepEqual(got, want) { + t.Errorf("AddMapping: got %+v, wanted %+v", got, want) + } + + // Mappings (usermem.AddrRanges => memmap.MappableRange): + // [0x10000, 0x12000) => [0x1000, 0x3000) + t.Log(&set) + + mapped = set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000) + if len(mapped) != 0 { + t.Errorf("AddMapping: got %+v, wanted []", mapped) + } + + // Mappings: + // [0x10000, 0x11000) => [0x1000, 0x2000) + // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000) + t.Log(&set) + + mapped = set.AddMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000) + if got, want := mapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) { + t.Errorf("AddMapping: got %+v, wanted %+v", got, want) + } + + // Mappings: + // [0x10000, 0x11000) => [0x1000, 0x2000) + // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000) + // [0x30000, 0x31000) => [0x4000, 0x5000) + t.Log(&set) + + mapped = set.AddMapping(ms, usermem.AddrRange{0x12000, 0x15000}, 0x3000) + if got, want := mapped, []MappableRange{{0x3000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) { + t.Errorf("AddMapping: got %+v, wanted %+v", got, want) + } + + // Mappings: + // [0x10000, 0x11000) => [0x1000, 0x2000) + // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000) + // [0x12000, 0x13000) => [0x3000, 0x4000) + // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000) + // [0x14000, 0x15000) => [0x5000, 0x6000) + t.Log(&set) + + unmapped := set.RemoveMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0x1000) + if got, want := unmapped, []MappableRange{{0x1000, 0x2000}}; !reflect.DeepEqual(got, want) { + t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) + } + + // Mappings: + // [0x11000, 0x12000) and [0x20000, 0x21000) => [0x2000, 0x3000) + // [0x12000, 0x13000) => [0x3000, 0x4000) + // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000) + // [0x14000, 0x15000) => [0x5000, 0x6000) + t.Log(&set) + + unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000) + if len(unmapped) != 0 { + t.Errorf("RemoveMapping: got %+v, wanted []", unmapped) + } + + // Mappings: + // [0x11000, 0x13000) => [0x2000, 0x4000) + // [0x13000, 0x14000) and [0x30000, 0x31000) => [0x4000, 0x5000) + // [0x14000, 0x15000) => [0x5000, 0x6000) + t.Log(&set) + + unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x11000, 0x15000}, 0x2000) + if got, want := unmapped, []MappableRange{{0x2000, 0x4000}, {0x5000, 0x6000}}; !reflect.DeepEqual(got, want) { + t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) + } + + // Mappings: + // [0x30000, 0x31000) => [0x4000, 0x5000) + t.Log(&set) + + unmapped = set.RemoveMapping(ms, usermem.AddrRange{0x30000, 0x31000}, 0x4000) + if got, want := unmapped, []MappableRange{{0x4000, 0x5000}}; !reflect.DeepEqual(got, want) { + t.Errorf("RemoveMapping: got %+v, wanted %+v", got, want) + } +} + +func TestInvalidateWholeMapping(t *testing.T) { + set := MappingSet{} + ms := &testMappingSpace{} + + set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0) + // Mappings: + // [0x10000, 0x11000) => [0, 0x1000) + t.Log(&set) + set.Invalidate(MappableRange{0, 0x1000}, InvalidateOpts{}) + if got, want := ms.inv, []usermem.AddrRange{{0x10000, 0x11000}}; !reflect.DeepEqual(got, want) { + t.Errorf("Invalidate: got %+v, wanted %+v", got, want) + } +} + +func TestInvalidatePartialMapping(t *testing.T) { + set := MappingSet{} + ms := &testMappingSpace{} + + set.AddMapping(ms, usermem.AddrRange{0x10000, 0x13000}, 0) + // Mappings: + // [0x10000, 0x13000) => [0, 0x3000) + t.Log(&set) + set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{}) + if got, want := ms.inv, []usermem.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { + t.Errorf("Invalidate: got %+v, wanted %+v", got, want) + } +} + +func TestInvalidateMultipleMappings(t *testing.T) { + set := MappingSet{} + ms := &testMappingSpace{} + + set.AddMapping(ms, usermem.AddrRange{0x10000, 0x11000}, 0) + set.AddMapping(ms, usermem.AddrRange{0x20000, 0x21000}, 0x2000) + // Mappings: + // [0x10000, 0x11000) => [0, 0x1000) + // [0x12000, 0x13000) => [0x2000, 0x3000) + t.Log(&set) + set.Invalidate(MappableRange{0, 0x3000}, InvalidateOpts{}) + if got, want := ms.inv, []usermem.AddrRange{{0x10000, 0x11000}, {0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { + t.Errorf("Invalidate: got %+v, wanted %+v", got, want) + } +} + +func TestInvalidateOverlappingMappings(t *testing.T) { + set := MappingSet{} + ms1 := &testMappingSpace{} + ms2 := &testMappingSpace{} + + set.AddMapping(ms1, usermem.AddrRange{0x10000, 0x12000}, 0) + set.AddMapping(ms2, usermem.AddrRange{0x20000, 0x22000}, 0x1000) + // Mappings: + // ms1:[0x10000, 0x12000) => [0, 0x2000) + // ms2:[0x11000, 0x13000) => [0x1000, 0x3000) + t.Log(&set) + set.Invalidate(MappableRange{0x1000, 0x2000}, InvalidateOpts{}) + if got, want := ms1.inv, []usermem.AddrRange{{0x11000, 0x12000}}; !reflect.DeepEqual(got, want) { + t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want) + } + if got, want := ms2.inv, []usermem.AddrRange{{0x20000, 0x21000}}; !reflect.DeepEqual(got, want) { + t.Errorf("Invalidate: ms1: got %+v, wanted %+v", got, want) + } +} diff --git a/pkg/sentry/memmap/memmap.go b/pkg/sentry/memmap/memmap.go new file mode 100644 index 000000000..14fed55bc --- /dev/null +++ b/pkg/sentry/memmap/memmap.go @@ -0,0 +1,297 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memmap defines semantics for memory mappings. +package memmap + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Mappable represents a memory-mappable object, a mutable mapping from uint64 +// offsets to (platform.File, uint64 File offset) pairs. +// +// See mm/mm.go for Mappable's place in the lock order. +// +// Preconditions: For all Mappable methods, usermem.AddrRanges and +// MappableRanges must be non-empty (Length() != 0), and usermem.Addrs and +// Mappable offsets must be page-aligned. +type Mappable interface { + // AddMapping notifies the Mappable of a mapping from addresses ar in ms to + // offsets [offset, offset+ar.Length()) in this Mappable. + // + // Preconditions: offset+ar.Length() does not overflow. + AddMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64) error + + // RemoveMapping notifies the Mappable of the removal of a mapping from + // addresses ar in ms to offsets [offset, offset+ar.Length()) in this + // Mappable. + // + // Preconditions: offset+ar.Length() does not overflow. The removed mapping + // must exist. + RemoveMapping(ctx context.Context, ms MappingSpace, ar usermem.AddrRange, offset uint64) + + // CopyMapping notifies the Mappable of an attempt to copy a mapping in ms + // from srcAR to dstAR. For most Mappables, this is equivalent to + // AddMapping. + // + // CopyMapping is only called when a mapping is copied within a given + // MappingSpace; it is analogous to Linux's vm_operations_struct::mremap. + // + // Preconditions: offset+dstAR.Length() does not overflow. The mapping at + // srcAR must exist. + CopyMapping(ctx context.Context, ms MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error + + // Translate returns the Mappable's current mappings for at least the range + // of offsets specified by required, and at most the range of offsets + // specified by optional. at is the set of access types that may be + // performed using the returned Translations. If not all required offsets + // are translated, it returns a non-nil error explaining why. Returned + // translations, and any mappings returned by platform.File.MapInternal for + // translated platform.Files, are valid until invalidated by a call back to + // MappingSpace.Invalidate or until the caller removes its mapping of the + // translated range. + // + // Preconditions: required.Length() > 0. optional.IsSupersetOf(required). + // required and optional must be page-aligned. The caller must have + // established a mapping for all of the queried offsets via a previous call + // to AddMapping. The caller is responsible for ensuring that calls to + // Translate synchronize with invalidation. + // + // Postconditions: See CheckTranslateResult. + Translate(ctx context.Context, required, optional MappableRange, at usermem.AccessType) ([]Translation, error) + + // InvalidateUnsavable requests that the Mappable invalidate Translations + // that cannot be preserved across save/restore. + // + // Invariant: InvalidateUnsavable never races with concurrent calls to any + // other Mappable methods. + InvalidateUnsavable(ctx context.Context) error +} + +// Translations are returned by Mappable.Translate. +type Translation struct { + // Source is the translated range in the Mappable. + Source MappableRange + + // File is the mapped file. When the Translation is invalidated, pages + // mapped by File.MapInto must be unmapped, and pages mapped by + // File.MapInternal become invalid. + File platform.File + + // Offset is the offset into File at which this Translation begins. + Offset uint64 +} + +// CheckTranslateResult returns an error if (ts, terr) does not satisfy all +// postconditions for Mappable.Translate(required, optional). +// +// Preconditions: As for Mappable.Translate. +func CheckTranslateResult(required, optional MappableRange, ts []Translation, terr error) error { + // Verify that the inputs to Mappable.Translate were valid. + if !required.WellFormed() || required.Length() <= 0 { + panic(fmt.Sprintf("invalid required range: %v", required)) + } + if !usermem.Addr(required.Start).IsPageAligned() || !usermem.Addr(required.End).IsPageAligned() { + panic(fmt.Sprintf("unaligned required range: %v", required)) + } + if !optional.IsSupersetOf(required) { + panic(fmt.Sprintf("optional range %v is not a superset of required range %v", optional, required)) + } + if !usermem.Addr(optional.Start).IsPageAligned() || !usermem.Addr(optional.End).IsPageAligned() { + panic(fmt.Sprintf("unaligned optional range: %v", optional)) + } + + // The first Translation must include required.Start. + if len(ts) != 0 && !ts[0].Source.Contains(required.Start) { + return fmt.Errorf("first Translation %+v does not cover start of required range %v", ts[0], required) + } + for i, t := range ts { + if !t.Source.WellFormed() || t.Source.Length() <= 0 { + return fmt.Errorf("Translation %+v has invalid Source", t) + } + if !usermem.Addr(t.Source.Start).IsPageAligned() || !usermem.Addr(t.Source.End).IsPageAligned() { + return fmt.Errorf("Translation %+v has unaligned Source", t) + } + if t.File == nil { + return fmt.Errorf("Translation %+v has nil File", t) + } + if !usermem.Addr(t.Offset).IsPageAligned() { + return fmt.Errorf("Translation %+v has unaligned Offset", t) + } + // Translations must be contiguous and in increasing order of + // Translation.Source. + if i > 0 && ts[i-1].Source.End != t.Source.Start { + return fmt.Errorf("Translations %+v and %+v are not contiguous", ts[i-1], t) + } + // At least part of each Translation must be required. + if t.Source.Intersect(required).Length() == 0 { + return fmt.Errorf("Translation %+v lies entirely outside required range %v", t, required) + } + // Translations must be constrained to the optional range. + if !optional.IsSupersetOf(t.Source) { + return fmt.Errorf("Translation %+v lies outside optional range %v", t, optional) + } + } + // If the set of Translations does not cover the entire required range, + // Translate must return a non-nil error explaining why. + if terr == nil { + if len(ts) == 0 { + return fmt.Errorf("no Translations and no error") + } + if t := ts[len(ts)-1]; !t.Source.Contains(required.End - 1) { + return fmt.Errorf("last Translation %+v does not reach end of required range %v, but Translate returned no error", t, required) + } + } + return nil +} + +// BusError may be returned by implementations of Mappable.Translate for errors +// that should result in SIGBUS delivery if they cause application page fault +// handling to fail. +type BusError struct { + // Err is the original error. + Err error +} + +// Error implements error.Error. +func (b *BusError) Error() string { + return fmt.Sprintf("BusError: %v", b.Err.Error()) +} + +// MappableRange represents a range of uint64 offsets into a Mappable. +// +// type MappableRange <generated using go_generics> + +// String implements fmt.Stringer.String. +func (mr MappableRange) String() string { + return fmt.Sprintf("[%#x, %#x)", mr.Start, mr.End) +} + +// MappingSpace represents a mutable mapping from usermem.Addrs to (Mappable, +// uint64 offset) pairs. +type MappingSpace interface { + // Invalidate is called to notify the MappingSpace that values returned by + // previous calls to Mappable.Translate for offsets mapped by addresses in + // ar are no longer valid. + // + // Invalidate must not take any locks preceding mm.MemoryManager.activeMu + // in the lock order. + // + // Preconditions: ar.Length() != 0. ar must be page-aligned. + Invalidate(ar usermem.AddrRange, opts InvalidateOpts) +} + +// InvalidateOpts holds options to MappingSpace.Invalidate. +type InvalidateOpts struct { + // InvalidatePrivate is true if private pages in the invalidated region + // should also be discarded, causing their data to be lost. + InvalidatePrivate bool +} + +// MappingIdentity controls the lifetime of a Mappable, and provides +// information about the Mappable for /proc/[pid]/maps. It is distinct from +// Mappable because all Mappables that are coherent must compare equal to +// support the implementation of shared futexes, but different +// MappingIdentities may represent the same Mappable, in the same way that +// multiple fs.Files may represent the same fs.Inode. (This similarity is not +// coincidental; fs.File implements MappingIdentity, and some +// fs.InodeOperations implement Mappable.) +type MappingIdentity interface { + // MappingIdentity is reference-counted. + refs.RefCounter + + // MappedName returns the application-visible name shown in + // /proc/[pid]/maps. + MappedName(ctx context.Context) string + + // DeviceID returns the device number shown in /proc/[pid]/maps. + DeviceID() uint64 + + // InodeID returns the inode number shown in /proc/[pid]/maps. + InodeID() uint64 + + // Msync has the same semantics as fs.FileOperations.Fsync(ctx, + // int64(mr.Start), int64(mr.End-1), fs.SyncData). + // (fs.FileOperations.Fsync() takes an inclusive end, but mr.End is + // exclusive, hence mr.End-1.) It is defined rather than Fsync so that + // implementors don't need to depend on the fs package for fs.SyncType. + Msync(ctx context.Context, mr MappableRange) error +} + +// MMapOpts specifies a request to create a memory mapping. +type MMapOpts struct { + // Length is the length of the mapping. + Length uint64 + + // MappingIdentity controls the lifetime of Mappable, and provides + // properties of the mapping shown in /proc/[pid]/maps. If MMapOpts is used + // to successfully create a memory mapping, a reference is taken on + // MappingIdentity. + MappingIdentity MappingIdentity + + // Mappable is the Mappable to be mapped. If Mappable is nil, the mapping + // is anonymous. If Mappable is not nil, it must remain valid as long as a + // reference is held on MappingIdentity. + Mappable Mappable + + // Offset is the offset into Mappable to map. If Mappable is nil, Offset is + // ignored. + Offset uint64 + + // Addr is the suggested address for the mapping. + Addr usermem.Addr + + // Fixed specifies whether this is a fixed mapping (it must be located at + // Addr). + Fixed bool + + // Unmap specifies whether existing mappings in the range being mapped may + // be replaced. If Unmap is true, Fixed must be true. + Unmap bool + + // Perms is the set of permissions to the applied to this mapping. + Perms usermem.AccessType + + // MaxPerms limits the set of permissions that may ever apply to this + // mapping. If Mappable is not nil, all memmap.Translations returned by + // Mappable.Translate must support all accesses in MaxPerms. + // + // Preconditions: MaxAccessType should be an effective AccessType, as + // access cannot be limited beyond effective AccessTypes. + MaxPerms usermem.AccessType + + // Private is true if writes to the mapping should be propagated to a copy + // that is exclusive to the MemoryManager. + Private bool + + // GrowsDown is true if the mapping should be automatically expanded + // downward on guard page faults. + GrowsDown bool + + // Precommit is true if the platform should eagerly commit resources to the + // mapping (see platform.AddressSpace.MapFile). + Precommit bool + + // Hint is the name used for the mapping in /proc/[pid]/maps. If Hint is + // empty, MappingIdentity.MappedName() will be used instead. + // + // TODO: Replace entirely with MappingIdentity? + Hint string +} diff --git a/pkg/sentry/memutil/BUILD b/pkg/sentry/memutil/BUILD new file mode 100644 index 000000000..a387a0c9f --- /dev/null +++ b/pkg/sentry/memutil/BUILD @@ -0,0 +1,14 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "memutil", + srcs = [ + "memutil.go", + "memutil_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/memutil", + visibility = ["//pkg/sentry:internal"], + deps = ["@org_golang_x_sys//unix:go_default_library"], +) diff --git a/pkg/sentry/memutil/memutil.go b/pkg/sentry/memutil/memutil.go new file mode 100644 index 000000000..4f245cf3c --- /dev/null +++ b/pkg/sentry/memutil/memutil.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memutil contains the utility functions for memory operations. +package memutil diff --git a/pkg/sentry/memutil/memutil_unsafe.go b/pkg/sentry/memutil/memutil_unsafe.go new file mode 100644 index 000000000..32c27eb2f --- /dev/null +++ b/pkg/sentry/memutil/memutil_unsafe.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package memutil + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// CreateMemFD creates a memfd file and returns the fd. +func CreateMemFD(name string, flags int) (fd int, err error) { + p, err := syscall.BytePtrFromString(name) + if err != nil { + return -1, err + } + r0, _, e0 := syscall.Syscall(unix.SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e0 != 0 { + return -1, e0 + } + return int(r0), nil +} diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD new file mode 100644 index 000000000..39bde2be3 --- /dev/null +++ b/pkg/sentry/mm/BUILD @@ -0,0 +1,155 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "mm_state", + srcs = [ + "aio_context.go", + "aio_context_state.go", + "file_refcount_set.go", + "io_list.go", + "mm.go", + "pma_set.go", + "save_restore.go", + "special_mappable.go", + "vma_set.go", + ], + out = "mm_state.go", + package = "mm", +) + +go_template_instance( + name = "file_refcount_set", + out = "file_refcount_set.go", + imports = { + "platform": "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + }, + package = "mm", + prefix = "fileRefcount", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "platform.FileRange", + "Value": "int32", + "Functions": "fileRefcountSetFunctions", + }, +) + +go_template_instance( + name = "vma_set", + out = "vma_set.go", + consts = { + "minDegree": "8", + }, + imports = { + "usermem": "gvisor.googlesource.com/gvisor/pkg/sentry/usermem", + }, + package = "mm", + prefix = "vma", + template = "//pkg/segment:generic_set", + types = { + "Key": "usermem.Addr", + "Range": "usermem.AddrRange", + "Value": "vma", + "Functions": "vmaSetFunctions", + }, +) + +go_template_instance( + name = "pma_set", + out = "pma_set.go", + consts = { + "minDegree": "8", + }, + imports = { + "usermem": "gvisor.googlesource.com/gvisor/pkg/sentry/usermem", + }, + package = "mm", + prefix = "pma", + template = "//pkg/segment:generic_set", + types = { + "Key": "usermem.Addr", + "Range": "usermem.AddrRange", + "Value": "pma", + "Functions": "pmaSetFunctions", + }, +) + +go_template_instance( + name = "io_list", + out = "io_list.go", + package = "mm", + prefix = "io", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*ioResult", + }, +) + +go_library( + name = "mm", + srcs = [ + "address_space.go", + "aio_context.go", + "aio_context_state.go", + "debug.go", + "file_refcount_set.go", + "io.go", + "io_list.go", + "lifecycle.go", + "metadata.go", + "mm.go", + "mm_state.go", + "pma.go", + "pma_set.go", + "proc_pid_maps.go", + "save_restore.go", + "special_mappable.go", + "syscalls.go", + "vma.go", + "vma_set.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/mm", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/atomicbitops", + "//pkg/log", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/proc/seqfile", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/platform/safecopy", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/sync", + "//pkg/syserror", + "//pkg/tcpip/buffer", + ], +) + +go_test( + name = "mm_test", + size = "small", + srcs = ["mm_test.go"], + embed = [":mm"], + deps = [ + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/context/contexttest", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/platform", + "//pkg/sentry/usermem", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/mm/README.md b/pkg/sentry/mm/README.md new file mode 100644 index 000000000..067733475 --- /dev/null +++ b/pkg/sentry/mm/README.md @@ -0,0 +1,279 @@ +This package provides an emulation of Linux semantics for application virtual +memory mappings. + +For completeness, this document also describes aspects of the memory management +subsystem defined outside this package. + +# Background + +We begin by describing semantics for virtual memory in Linux. + +A virtual address space is defined as a collection of mappings from virtual +addresses to physical memory. However, userspace applications do not configure +mappings to physical memory directly. Instead, applications configure memory +mappings from virtual addresses to offsets into a file using the `mmap` system +call.[^mmap-anon] For example, a call to: + + mmap( + /* addr = */ 0x400000, + /* length = */ 0x1000, + PROT_READ | PROT_WRITE, + MAP_SHARED, + /* fd = */ 3, + /* offset = */ 0); + +creates a mapping of length 0x1000 bytes, starting at virtual address (VA) +0x400000, to offset 0 in the file represented by file descriptor (FD) 3. Within +the Linux kernel, virtual memory mappings are represented by *virtual memory +areas* (VMAs). Supposing that FD 3 represents file /tmp/foo, the state of the +virtual memory subsystem after the `mmap` call may be depicted as: + + VMA: VA:0x400000 -> /tmp/foo:0x0 + +Establishing a virtual memory area does not necessarily establish a mapping to a +physical address, because Linux has not necessarily provisioned physical memory +to store the file's contents. Thus, if the application attempts to read the +contents of VA 0x400000, it may incur a *page fault*, a CPU exception that +forces the kernel to create such a mapping to service the read. + +For a file, doing so consists of several logical phases: + +1. The kernel allocates physical memory to store the contents of the required + part of the file, and copies file contents to the allocated memory. Supposing + that the kernel chooses the physical memory at physical address (PA) + 0x2fb000, the resulting state of the system is: + + VMA: VA:0x400000 -> /tmp/foo:0x0 + Filemap: /tmp/foo:0x0 -> PA:0x2fb000 + + (In Linux the state of the mapping from file offset to physical memory is + stored in `struct address_space`, but to avoid confusion with other notions + of address space we will refer to this system as filemap, named after Linux + kernel source file `mm/filemap.c`.) + +2. The kernel stores the effective mapping from virtual to physical address in a + *page table entry* (PTE) in the application's *page tables*, which are used + by the CPU's virtual memory hardware to perform address translation. The + resulting state of the system is: + + VMA: VA:0x400000 -> /tmp/foo:0x0 + Filemap: /tmp/foo:0x0 -> PA:0x2fb000 + PTE: VA:0x400000 -----------------> PA:0x2fb000 + + The PTE is required for the application to actually use the contents of the + mapped file as virtual memory. However, the PTE is derived from the VMA and + filemap state, both of which are independently mutable, such that mutations + to either will affect the PTE. For example: + + - The application may remove the VMA using the `munmap` system call. This + breaks the mapping from VA:0x400000 to /tmp/foo:0x0, and consequently the + mapping from VA:0x400000 to PA:0x2fb000. However, it does not necessarily + break the mapping from /tmp/foo:0x0 to PA:0x2fb000, so a future mapping of + the same file offset may reuse this physical memory. + + - The application may invalidate the file's contents by passing a length of 0 + to the `ftruncate` system call. This breaks the mapping from /tmp/foo:0x0 + to PA:0x2fb000, and consequently the mapping from VA:0x400000 to + PA:0x2fb000. However, it does not break the mapping from VA:0x400000 to + /tmp/foo:0x0, so future changes to the file's contents may again be made + visible at VA:0x400000 after another page fault results in the allocation + of a new physical address. + + Note that, in order to correctly break the mapping from VA:0x400000 to + PA:0x2fb000 in the latter case, filemap must also store a *reverse mapping* + from /tmp/foo:0x0 to VA:0x400000 so that it can locate and remove the PTE. + +[^mmap-anon]: Memory mappings to non-files are discussed in later sections. + +## Private Mappings + +The preceding example considered VMAs created using the `MAP_SHARED` flag, which +means that PTEs derived from the mapping should always use physical memory that +represents the current state of the mapped file.[^mmap-dev-zero] Applications +can alternatively pass the `MAP_PRIVATE` flag to create a *private mapping*. +Private mappings are *copy-on-write*. + +Suppose that the application instead created a private mapping in the previous +example. In Linux, the state of the system after a read page fault would be: + + VMA: VA:0x400000 -> /tmp/foo:0x0 (private) + Filemap: /tmp/foo:0x0 -> PA:0x2fb000 + PTE: VA:0x400000 -----------------> PA:0x2fb000 (read-only) + +Now suppose the application attempts to write to VA:0x400000. For a shared +mapping, the write would be propagated to PA:0x2fb000, and the kernel would be +responsible for ensuring that the write is later propagated to the mapped file. +For a private mapping, the write incurs another page fault since the PTE is +marked read-only. In response, the kernel allocates physical memory to store the +mapping's *private copy* of the file's contents, copies file contents to the +allocated memory, and changes the PTE to map to the private copy. Supposing that +the kernel chooses the physical memory at physical address (PA) 0x5ea000, the +resulting state of the system is: + + VMA: VA:0x400000 -> /tmp/foo:0x0 (private) + Filemap: /tmp/foo:0x0 -> PA:0x2fb000 + PTE: VA:0x400000 -----------------> PA:0x5ea000 + +Note that the filemap mapping from /tmp/foo:0x0 to PA:0x2fb000 may still exist, +but is now irrelevant to this mapping. + +[^mmap-dev-zero]: Modulo files with special mmap semantics such as `/dev/zero`. + +## Anonymous Mappings + +Instead of passing a file to the `mmap` system call, applications can instead +request an *anonymous* mapping by passing the `MAP_ANONYMOUS` flag. +Semantically, an anonymous mapping is essentially a mapping to an ephemeral file +initially filled with zero bytes. Practically speaking, this is how shared +anonymous mappings are implemented, but private anonymous mappings do not result +in the creation of an ephemeral file; since there would be no way to modify the +contents of the underlying file through a private mapping, all private anonymous +mappings use a single shared page filled with zero bytes until copy-on-write +occurs. + +# Virtual Memory in the Sentry + +The sentry implements application virtual memory atop a host kernel, introducing +an additional level of indirection to the above. + +Consider the same scenario as in the previous section. Since the sentry handles +application system calls, the effect of an application `mmap` system call is to +create a VMA in the sentry (as opposed to the host kernel): + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 + +When the application first incurs a page fault on this address, the host kernel +delivers information about the page fault to the sentry in a platform-dependent +manner, and the sentry handles the fault: + +1. The sentry allocates memory to store the contents of the required part of the + file, and copies file contents to the allocated memory. However, since the + sentry is implemented atop a host kernel, it does not configure mappings to + physical memory directly. Instead, mappable "memory" in the sentry is + represented by a host file descriptor and offset, since (as noted in + "Background") this is the memory mapping primitive provided by the host + kernel. In general, memory is allocated from a temporary host file using the + `filemem` package. Supposing that the sentry allocates offset 0x3000 from + host file "memory-file", the resulting state is: + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 + Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000 + +2. The sentry stores the effective mapping from virtual address to host file in + a host VMA by invoking the `mmap` system call: + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 + Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000 + Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000 + +3. The sentry returns control to the application, which immediately incurs the + page fault again.[^mmap-populate] However, since a host VMA now exists for + the faulting virtual address, the host kernel now handles the page fault as + described in "Background": + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 + Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000 + Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000 + Host filemap: host:memory-file:0x3000 -> PA:0x2fb000 + Host PTE: VA:0x400000 --------------------------------------------> PA:0x2fb000 + +Thus, from an implementation standpoint, host VMAs serve the same purpose in the +sentry that PTEs do in Linux. As in Linux, sentry VMA and filemap state is +independently mutable, and the desired state of host VMAs is derived from that +state. + +[^mmap-populate]: The sentry could force the host kernel to establish PTEs when + it creates the host VMA by passing the `MAP_POPULATE` flag to + the `mmap` system call, but usually does not. This is because, + to reduce the number of page faults that require handling by + the sentry and (correspondingly) the number of host `mmap` + system calls, the sentry usually creates host VMAs that are + much larger than the single faulting page. + +## Private Mappings + +The sentry implements private mappings consistently with Linux. Before +copy-on-write, the private mapping example given in the Background results in: + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 (private) + Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000 + Host VMA: VA:0x400000 -----------------> host:memory-file:0x3000 (read-only) + Host filemap: host:memory-file:0x3000 -> PA:0x2fb000 + Host PTE: VA:0x400000 --------------------------------------------> PA:0x2fb000 (read-only) + +When the application attempts to write to this address, the host kernel delivers +information about the resulting page fault to the sentry. Analogous to Linux, +the sentry allocates memory to store the mapping's private copy of the file's +contents, copies file contents to the allocated memory, and changes the host VMA +to map to the private copy. Supposing that the sentry chooses the offset 0x4000 +in host file `memory-file` to store the private copy, the state of the system +after copy-on-write is: + + Sentry VMA: VA:0x400000 -> /tmp/foo:0x0 (private) + Sentry filemap: /tmp/foo:0x0 -> host:memory-file:0x3000 + Host VMA: VA:0x400000 -----------------> host:memory-file:0x4000 + Host filemap: host:memory-file:0x4000 -> PA:0x5ea000 + Host PTE: VA:0x400000 --------------------------------------------> PA:0x5ea000 + +However, this highlights an important difference between Linux and the sentry. +In Linux, page tables are concrete (architecture-dependent) data structures +owned by the kernel. Conversely, the sentry has the ability to create and +destroy host VMAs using host system calls, but it does not have direct access to +their state. Thus, as written, if the application invokes the `munmap` system +call to remove the sentry VMA, it is non-trivial for the sentry to determine +that it should deallocate `host:memory-file:0x4000`. This implies that the +sentry must retain information about the host VMAs that it has created. + +## Anonymous Mappings + +The sentry implements anonymous mappings consistently with Linux, except that +there is no shared zero page. + +# Implementation Constructs + +In Linux: + +- A virtual address space is represented by `struct mm_struct`. + +- VMAs are represented by `struct vm_area_struct`, stored in `struct + mm_struct::mmap`. + +- Mappings from file offsets to physical memory are stored in `struct + address_space`. + +- Reverse mappings from file offsets to virtual mappings are stored in `struct + address_space::i_mmap`. + +- Physical memory pages are represented by a pointer to `struct page` or an + index called a *page frame number* (PFN), represented by `pfn_t`. + +- PTEs are represented by architecture-dependent type `pte_t`, stored in a table + hierarchy rooted at `struct mm_struct::pgd`. + +In the sentry: + +- A virtual address space is represented by type [`mm.MemoryManager`][mm]. + +- Sentry VMAs are represented by type [`mm.vma`][mm], stored in + `mm.MemoryManager.vmas`. + +- Mappings from sentry file offsets to host file offsets are abstracted through + interface method [`memmap.Mappable.Translate`][memmap]. + +- Reverse mappings from sentry file offsets to virtual mappings are abstracted + through interface methods [`memmap.Mappable.AddMapping` and + `memmap.Mappable.RemoveMapping`][memmap]. + +- Host files that may be mapped into host VMAs are represented by type + [`platform.File`][platform]. + +- Host VMAs are represented in the sentry by type [`mm.pma`][mm] ("platform + mapping area"), stored in `mm.MemoryManager.pmas`. + +- Creation and destruction of host VMAs is abstracted through interface methods + [`platform.AddressSpace.MapFile` and `platform.AddressSpace.Unmap`][platform]. + +[filemem]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/platform/filemem/filemem.go +[memmap]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/memmap/memmap.go +[mm]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/mm/mm.go +[platform]: https://gvisor.googlesource.com/gvisor/+/master/pkg/sentry/platform/platform.go diff --git a/pkg/sentry/mm/address_space.go b/pkg/sentry/mm/address_space.go new file mode 100644 index 000000000..4dd67b1ea --- /dev/null +++ b/pkg/sentry/mm/address_space.go @@ -0,0 +1,223 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/atomicbitops" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// AddressSpace returns the platform.AddressSpace bound to mm. +// +// Preconditions: The caller must have called mm.Activate(). +func (mm *MemoryManager) AddressSpace() platform.AddressSpace { + if atomic.LoadInt32(&mm.active) == 0 { + panic("trying to use inactive address space?") + } + return mm.as +} + +// Activate ensures this MemoryManager has a platform.AddressSpace. +// +// The caller must not hold any locks when calling Activate. +// +// When this MemoryManager is no longer needed by a task, it should call +// Deactivate to release the reference. +func (mm *MemoryManager) Activate() error { + // Fast path: the MemoryManager already has an active + // platform.AddressSpace, and we just need to indicate that we need it too. + if atomicbitops.IncUnlessZeroInt32(&mm.active) { + return nil + } + + for { + // Slow path: may need to synchronize with other goroutines changing + // mm.active to or from zero. + mm.activeMu.Lock() + // Inline Unlock instead of using a defer for performance since this + // method is commonly in the hot-path. + + // Check if we raced with another goroutine performing activation. + if atomic.LoadInt32(&mm.active) > 0 { + // This can't race; Deactivate can't decrease mm.active from 1 to 0 + // without holding activeMu. + atomic.AddInt32(&mm.active, 1) + mm.activeMu.Unlock() + return nil + } + + // Do we have a context? If so, then we never unmapped it. This can + // only be the case if !mm.p.CooperativelySchedulesAddressSpace(). + if mm.as != nil { + atomic.StoreInt32(&mm.active, 1) + mm.activeMu.Unlock() + return nil + } + + // Get a new address space. We must force unmapping by passing nil to + // NewAddressSpace if requested. (As in the nil interface object, not a + // typed nil.) + mappingsID := (interface{})(mm) + if mm.unmapAllOnActivate { + mappingsID = nil + } + as, c, err := mm.p.NewAddressSpace(mappingsID) + if err != nil { + mm.activeMu.Unlock() + return err + } + if as == nil { + // AddressSpace is unavailable, we must wait. + // + // activeMu must not be held while waiting, as the user + // of the address space we are waiting on may attempt + // to take activeMu. + // + // Don't call UninterruptibleSleepStart to register the + // wait to allow the watchdog stuck task to trigger in + // case a process is starved waiting for the address + // space. + mm.activeMu.Unlock() + <-c + continue + } + + // Okay, we could restore all mappings at this point. + // But forget that. Let's just let them fault in. + mm.as = as + + // Unmapping is done, if necessary. + mm.unmapAllOnActivate = false + + // Now that m.as has been assigned, we can set m.active to a non-zero value + // to enable the fast path. + atomic.StoreInt32(&mm.active, 1) + + mm.activeMu.Unlock() + return nil + } +} + +// Deactivate releases a release to the MemoryManager. +func (mm *MemoryManager) Deactivate() error { + // Fast path: this is not the last goroutine to deactivate the + // MemoryManager. + if atomicbitops.DecUnlessOneInt32(&mm.active) { + return nil + } + + mm.activeMu.Lock() + // Same as Activate. + + // Still active? + if atomic.AddInt32(&mm.active, -1) > 0 { + mm.activeMu.Unlock() + return nil + } + + // Can we hold on to the address space? + if !mm.p.CooperativelySchedulesAddressSpace() { + mm.activeMu.Unlock() + return nil + } + + // Release the address space. + if err := mm.as.Release(); err != nil { + atomic.StoreInt32(&mm.active, 1) + mm.activeMu.Unlock() + return err + } + + // Lost it. + mm.as = nil + mm.activeMu.Unlock() + return nil +} + +// mapASLocked maps addresses in ar into mm.as. If precommit is true, mappings +// for all addresses in ar should be precommitted. +// +// Preconditions: mm.activeMu must be locked. mm.as != nil. ar.Length() != 0. +// ar must be page-aligned. pseg.Range().Contains(ar.Start). +func (mm *MemoryManager) mapASLocked(pseg pmaIterator, ar usermem.AddrRange, precommit bool) error { + // By default, map entire pmas at a time, under the assumption that there + // is no cost to mapping more of a pma than necessary. + mapAR := usermem.AddrRange{0, ^usermem.Addr(usermem.PageSize - 1)} + if precommit { + // When explicitly precommitting, only map ar, since overmapping may + // incur unexpected resource usage. + mapAR = ar + } else if mapUnit := mm.p.MapUnit(); mapUnit != 0 { + // Limit the range we map to ar, aligned to mapUnit. + mapMask := usermem.Addr(mapUnit - 1) + mapAR.Start = ar.Start &^ mapMask + // If rounding ar.End up overflows, just keep the existing mapAR.End. + if end := (ar.End + mapMask) &^ mapMask; end >= ar.End { + mapAR.End = end + } + } + if checkInvariants { + if !mapAR.IsSupersetOf(ar) { + panic(fmt.Sprintf("mapAR %#v is not a superset of ar %#v", mapAR, ar)) + } + } + + for { + pma := pseg.ValuePtr() + pmaAR := pseg.Range() + pmaMapAR := pmaAR.Intersect(mapAR) + perms := pma.vmaEffectivePerms + if pma.needCOW { + perms.Write = false + } + if err := pma.file.MapInto(mm.as, pmaMapAR.Start, pseg.fileRangeOf(pmaMapAR), perms, precommit); err != nil { + return err + } + // Since this checks ar.End and not mapAR.End, we will never map a pma + // that is not required. + if ar.End <= pmaAR.End { + return nil + } + pseg = pseg.NextSegment() + } +} + +// unmapASLocked removes all AddressSpace mappings for addresses in ar. +// +// Preconditions: mm.activeMu must be locked. +func (mm *MemoryManager) unmapASLocked(ar usermem.AddrRange) { + if mm.as == nil { + // No AddressSpace? Force all mappings to be unmapped on the next + // Activate. + mm.unmapAllOnActivate = true + return + } + + // unmapASLocked doesn't require vmas or pmas to exist for ar, so it can be + // passed ranges that include addresses that can't be mapped by the + // application. + ar = ar.Intersect(mm.applicationAddrRange()) + + // Note that this AddressSpace may or may not be active. If the + // platform does not require cooperative sharing of AddressSpaces, they + // are retained between Deactivate/Activate calls. Despite not being + // active, it is still valid to perform operations on these address + // spaces. + mm.as.Unmap(ar.Start, uint64(ar.Length())) +} diff --git a/pkg/sentry/mm/aio_context.go b/pkg/sentry/mm/aio_context.go new file mode 100644 index 000000000..992bde5a5 --- /dev/null +++ b/pkg/sentry/mm/aio_context.go @@ -0,0 +1,377 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// aioManager creates and manages asynchronous I/O contexts. +type aioManager struct { + // mu protects below. + mu sync.Mutex `state:"nosave"` + + // aioContexts is the set of asynchronous I/O contexts. + contexts map[uint64]*AIOContext +} + +func (a *aioManager) destroy() { + a.mu.Lock() + defer a.mu.Unlock() + + for _, ctx := range a.contexts { + ctx.destroy() + } +} + +// newAIOContext creates a new context for asynchronous I/O. +// +// Returns false if 'id' is currently in use. +func (a *aioManager) newAIOContext(events uint32, id uint64) bool { + a.mu.Lock() + defer a.mu.Unlock() + + if _, ok := a.contexts[id]; ok { + return false + } + + a.contexts[id] = &AIOContext{ + done: make(chan struct{}, 1), + maxOutstanding: events, + } + return true +} + +// destroyAIOContext destroys an asynchronous I/O context. +// +// False is returned if the context does not exist. +func (a *aioManager) destroyAIOContext(id uint64) bool { + a.mu.Lock() + defer a.mu.Unlock() + ctx, ok := a.contexts[id] + if !ok { + return false + } + delete(a.contexts, id) + ctx.destroy() + return true +} + +// lookupAIOContext looks up the given context. +// +// Returns false if context does not exist. +func (a *aioManager) lookupAIOContext(id uint64) (*AIOContext, bool) { + a.mu.Lock() + defer a.mu.Unlock() + ctx, ok := a.contexts[id] + return ctx, ok +} + +// ioResult is a completed I/O operation. +type ioResult struct { + data interface{} + ioEntry +} + +// AIOContext is a single asynchronous I/O context. +type AIOContext struct { + // done is the notification channel used for all requests. + done chan struct{} `state:"nosave"` + + // mu protects below. + mu sync.Mutex `state:"nosave"` + + // results is the set of completed requests. + results ioList + + // maxOutstanding is the maximum number of outstanding entries; this value + // is immutable. + maxOutstanding uint32 + + // outstanding is the number of requests outstanding; this will effectively + // be the number of entries in the result list or that are expected to be + // added to the result list. + outstanding uint32 + + // dead is set when the context is destroyed. + dead bool `state:"zerovalue"` +} + +// destroy marks the context dead. +func (ctx *AIOContext) destroy() { + ctx.mu.Lock() + defer ctx.mu.Unlock() + ctx.dead = true + if ctx.outstanding == 0 { + close(ctx.done) + } +} + +// Prepare reserves space for a new request, returning true if available. +// Returns false if the context is busy. +func (ctx *AIOContext) Prepare() bool { + ctx.mu.Lock() + defer ctx.mu.Unlock() + if ctx.outstanding >= ctx.maxOutstanding { + return false + } + ctx.outstanding++ + return true +} + +// PopRequest pops a completed request if available, this function does not do +// any blocking. Returns false if no request is available. +func (ctx *AIOContext) PopRequest() (interface{}, bool) { + ctx.mu.Lock() + defer ctx.mu.Unlock() + + // Is there anything ready? + if e := ctx.results.Front(); e != nil { + ctx.results.Remove(e) + ctx.outstanding-- + if ctx.outstanding == 0 && ctx.dead { + close(ctx.done) + } + return e.data, true + } + return nil, false +} + +// FinishRequest finishes a pending request. It queues up the data +// and notifies listeners. +func (ctx *AIOContext) FinishRequest(data interface{}) { + ctx.mu.Lock() + defer ctx.mu.Unlock() + + // Push to the list and notify opportunistically. The channel notify + // here is guaranteed to be safe because outstanding must be non-zero. + // The done channel is only closed when outstanding reaches zero. + ctx.results.PushBack(&ioResult{data: data}) + + select { + case ctx.done <- struct{}{}: + default: + } +} + +// WaitChannel returns a channel that is notified when an AIO request is +// completed. +// +// The boolean return value indicates whether or not the context is active. +func (ctx *AIOContext) WaitChannel() (chan struct{}, bool) { + ctx.mu.Lock() + defer ctx.mu.Unlock() + if ctx.outstanding == 0 && ctx.dead { + return nil, false + } + return ctx.done, true +} + +// aioMappable implements memmap.MappingIdentity and memmap.Mappable for AIO +// ring buffers. +type aioMappable struct { + refs.AtomicRefCount + + p platform.Platform + fr platform.FileRange +} + +var aioRingBufferSize = uint64(usermem.Addr(linux.AIORingSize).MustRoundUp()) + +func newAIOMappable(p platform.Platform) (*aioMappable, error) { + fr, err := p.Memory().Allocate(aioRingBufferSize, usage.Anonymous) + if err != nil { + return nil, err + } + return &aioMappable{p: p, fr: fr}, nil +} + +// DecRef implements refs.RefCounter.DecRef. +func (m *aioMappable) DecRef() { + m.AtomicRefCount.DecRefWithDestructor(func() { + m.p.Memory().DecRef(m.fr) + }) +} + +// MappedName implements memmap.MappingIdentity.MappedName. +func (m *aioMappable) MappedName(ctx context.Context) string { + return "[aio]" +} + +// DeviceID implements memmap.MappingIdentity.DeviceID. +func (m *aioMappable) DeviceID() uint64 { + return 0 +} + +// InodeID implements memmap.MappingIdentity.InodeID. +func (m *aioMappable) InodeID() uint64 { + return 0 +} + +// Msync implements memmap.MappingIdentity.Msync. +func (m *aioMappable) Msync(ctx context.Context, mr memmap.MappableRange) error { + // Linux: aio_ring_fops.fsync == NULL + return syserror.EINVAL +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (m *aioMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + // Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap() + // sets VM_DONTEXPAND). + if offset != 0 || uint64(ar.Length()) != aioRingBufferSize { + return syserror.EFAULT + } + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (m *aioMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (m *aioMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + // Don't allow mappings to be expanded (in Linux, fs/aio.c:aio_ring_mmap() + // sets VM_DONTEXPAND). + if offset != 0 || uint64(dstAR.Length()) != aioRingBufferSize { + return syserror.EFAULT + } + // Require that the mapping correspond to a live AIOContext. Compare + // Linux's fs/aio.c:aio_ring_mremap(). + mm, ok := ms.(*MemoryManager) + if !ok { + return syserror.EINVAL + } + am := &mm.aioManager + am.mu.Lock() + defer am.mu.Unlock() + oldID := uint64(srcAR.Start) + aioCtx, ok := am.contexts[oldID] + if !ok { + return syserror.EINVAL + } + aioCtx.mu.Lock() + defer aioCtx.mu.Unlock() + if aioCtx.dead { + return syserror.EINVAL + } + // Use the new ID for the AIOContext. + am.contexts[uint64(dstAR.Start)] = aioCtx + delete(am.contexts, oldID) + return nil +} + +// Translate implements memmap.Mappable.Translate. +func (m *aioMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + var err error + if required.End > m.fr.Length() { + err = &memmap.BusError{syserror.EFAULT} + } + if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 { + return []memmap.Translation{ + { + Source: source, + File: m.p.Memory(), + Offset: m.fr.Start + source.Start, + }, + }, err + } + return nil, err +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (m *aioMappable) InvalidateUnsavable(ctx context.Context) error { + return nil +} + +// NewAIOContext creates a new context for asynchronous I/O. +// +// NewAIOContext is analogous to Linux's fs/aio.c:ioctx_alloc(). +func (mm *MemoryManager) NewAIOContext(ctx context.Context, events uint32) (uint64, error) { + // libaio get_ioevents() expects context "handle" to be a valid address. + // libaio peeks inside looking for a magic number. This function allocates + // a page per context and keeps it set to zeroes to ensure it will not + // match AIO_RING_MAGIC and make libaio happy. + m, err := newAIOMappable(mm.p) + if err != nil { + return 0, err + } + defer m.DecRef() + addr, err := mm.MMap(ctx, memmap.MMapOpts{ + Length: aioRingBufferSize, + MappingIdentity: m, + Mappable: m, + // TODO: Linux does "do_mmap_pgoff(..., PROT_READ | + // PROT_WRITE, ...)" in fs/aio.c:aio_setup_ring(); why do we make this + // mapping read-only? + Perms: usermem.Read, + MaxPerms: usermem.Read, + }) + if err != nil { + return 0, err + } + id := uint64(addr) + if !mm.aioManager.newAIOContext(events, id) { + mm.MUnmap(ctx, addr, aioRingBufferSize) + return 0, syserror.EINVAL + } + return id, nil +} + +// DestroyAIOContext destroys an asynchronous I/O context. It returns false if +// the context does not exist. +func (mm *MemoryManager) DestroyAIOContext(ctx context.Context, id uint64) bool { + if _, ok := mm.LookupAIOContext(ctx, id); !ok { + return false + } + + // Only unmaps after it assured that the address is a valid aio context to + // prevent random memory from been unmapped. + // + // Note: It's possible to unmap this address and map something else into + // the same address. Then it would be unmapping memory that it doesn't own. + // This is, however, the way Linux implements AIO. Keeps the same [weird] + // semantics in case anyone relies on it. + mm.MUnmap(ctx, usermem.Addr(id), aioRingBufferSize) + + return mm.aioManager.destroyAIOContext(id) +} + +// LookupAIOContext looks up the given context. It returns false if the context +// does not exist. +func (mm *MemoryManager) LookupAIOContext(ctx context.Context, id uint64) (*AIOContext, bool) { + aioCtx, ok := mm.aioManager.lookupAIOContext(id) + if !ok { + return nil, false + } + + // Protect against 'ids' that are inaccessible (Linux also reads 4 bytes + // from id). + var buf [4]byte + _, err := mm.CopyIn(ctx, usermem.Addr(id), buf[:], usermem.IOOpts{}) + if err != nil { + return nil, false + } + + return aioCtx, true +} diff --git a/pkg/sentry/mm/aio_context_state.go b/pkg/sentry/mm/aio_context_state.go new file mode 100644 index 000000000..1a5e56f8e --- /dev/null +++ b/pkg/sentry/mm/aio_context_state.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +// afterLoad is invoked by stateify. +func (a *AIOContext) afterLoad() { + a.done = make(chan struct{}, 1) +} diff --git a/pkg/sentry/mm/debug.go b/pkg/sentry/mm/debug.go new file mode 100644 index 000000000..56d0490f0 --- /dev/null +++ b/pkg/sentry/mm/debug.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "bytes" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +const ( + // If checkInvariants is true, perform runtime checks for invariants + // expected by the mm package. This is normally disabled since MM is a + // significant hot path in general, and some such checks (notably + // memmap.CheckTranslateResult) are very expensive. + checkInvariants = false + + // If logIOErrors is true, log I/O errors that originate from MM before + // converting them to EFAULT. + logIOErrors = false +) + +// String implements fmt.Stringer.String. +func (mm *MemoryManager) String() string { + return mm.DebugString(context.Background()) +} + +// DebugString returns a string containing information about mm for debugging. +func (mm *MemoryManager) DebugString(ctx context.Context) string { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + mm.activeMu.RLock() + defer mm.activeMu.RUnlock() + return mm.debugStringLocked(ctx) +} + +// Preconditions: mm.mappingMu and mm.activeMu must be locked. +func (mm *MemoryManager) debugStringLocked(ctx context.Context) string { + var b bytes.Buffer + b.WriteString("VMAs:\n") + for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() { + b.Write(mm.vmaMapsEntryLocked(ctx, vseg)) + } + b.WriteString("PMAs:\n") + for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() { + b.Write(pseg.debugStringEntryLocked()) + } + return string(b.Bytes()) +} + +// Preconditions: mm.activeMu must be locked. +func (pseg pmaIterator) debugStringEntryLocked() []byte { + var b bytes.Buffer + + fmt.Fprintf(&b, "%08x-%08x ", pseg.Start(), pseg.End()) + + pma := pseg.ValuePtr() + if pma.vmaEffectivePerms.Read { + b.WriteByte('r') + } else { + b.WriteByte('-') + } + if pma.vmaEffectivePerms.Write { + if pma.needCOW { + b.WriteByte('c') + } else { + b.WriteByte('w') + } + } else { + b.WriteByte('-') + } + if pma.vmaEffectivePerms.Execute { + b.WriteByte('x') + } else { + b.WriteByte('-') + } + if pma.private { + b.WriteByte('p') + } else { + b.WriteByte('s') + } + + fmt.Fprintf(&b, " %08x %T\n", pma.off, pma.file) + return b.Bytes() +} diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go new file mode 100644 index 000000000..cac81a59d --- /dev/null +++ b/pkg/sentry/mm/io.go @@ -0,0 +1,604 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// There are two supported ways to copy data to/from application virtual +// memory: +// +// 1. Internally-mapped copying: Determine the platform.File that backs the +// copied-to/from virtual address, obtain a mapping of its pages, and read or +// write to the mapping. +// +// 2. AddressSpace copying: If platform.Platform.SupportsAddressSpaceIO() is +// true, AddressSpace permissions are applicable, and an AddressSpace is +// available, copy directly through the AddressSpace, handling faults as +// needed. +// +// (Given that internally-mapped copying requires that backing memory is always +// implemented using a host file descriptor, we could also preadv/pwritev to it +// instead. But this would incur a host syscall for each use of the mapped +// page, whereas mmap is a one-time cost.) +// +// The fixed overhead of internally-mapped copying is expected to be higher +// than that of AddressSpace copying since the former always needs to translate +// addresses, whereas the latter only needs to do so when faults occur. +// However, the throughput of internally-mapped copying is expected to be +// somewhat higher than that of AddressSpace copying due to the high cost of +// page faults and because implementations of the latter usually rely on +// safecopy, which doesn't use AVX registers. So we prefer to use AddressSpace +// copying (when available) for smaller copies, and switch to internally-mapped +// copying once a size threshold is exceeded. +const ( + // copyMapMinBytes is the size threshold for switching to internally-mapped + // copying in CopyOut, CopyIn, and ZeroOut. + copyMapMinBytes = 32 << 10 // 32 KB + + // rwMapMinBytes is the size threshold for switching to internally-mapped + // copying in CopyOutFrom and CopyInTo. It's lower than copyMapMinBytes + // since AddressSpace copying in this case requires additional buffering; + // see CopyOutFrom for details. + rwMapMinBytes = 512 +) + +// checkIORange is similar to usermem.Addr.ToRange, but applies bounds checks +// consistent with Linux's arch/x86/include/asm/uaccess.h:access_ok(). +// +// Preconditions: length >= 0. +func (mm *MemoryManager) checkIORange(addr usermem.Addr, length int64) (usermem.AddrRange, bool) { + // Note that access_ok() constrains end even if length == 0. + ar, ok := addr.ToRange(uint64(length)) + return ar, (ok && ar.End <= mm.layout.MaxAddr) +} + +// checkIOVec applies bound checks consistent with Linux's +// arch/x86/include/asm/uaccess.h:access_ok() to ars. +func (mm *MemoryManager) checkIOVec(ars usermem.AddrRangeSeq) bool { + for !ars.IsEmpty() { + ar := ars.Head() + if _, ok := mm.checkIORange(ar.Start, int64(ar.Length())); !ok { + return false + } + ars = ars.Tail() + } + return true +} + +func (mm *MemoryManager) asioEnabled(opts usermem.IOOpts) bool { + return mm.haveASIO && !opts.IgnorePermissions && opts.AddressSpaceActive +} + +// translateIOError converts errors to EFAULT, as is usually reported for all +// I/O errors originating from MM in Linux. +func translateIOError(ctx context.Context, err error) error { + if err == nil { + return nil + } + if logIOErrors { + ctx.Debugf("MM I/O error: %v", err) + } + return syserror.EFAULT +} + +// CopyOut implements usermem.IO.CopyOut. +func (mm *MemoryManager) CopyOut(ctx context.Context, addr usermem.Addr, src []byte, opts usermem.IOOpts) (int, error) { + ar, ok := mm.checkIORange(addr, int64(len(src))) + if !ok { + return 0, syserror.EFAULT + } + + if len(src) == 0 { + return 0, nil + } + + // Do AddressSpace IO if applicable. + if mm.asioEnabled(opts) && len(src) < copyMapMinBytes { + return mm.asCopyOut(ctx, addr, src) + } + + // Go through internal mappings. + n64, err := mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src))) + return n, translateIOError(ctx, err) + }) + return int(n64), err +} + +func (mm *MemoryManager) asCopyOut(ctx context.Context, addr usermem.Addr, src []byte) (int, error) { + var done int + for { + n, err := mm.as.CopyOut(addr+usermem.Addr(done), src[done:]) + done += n + if err == nil { + return done, nil + } + if f, ok := err.(platform.SegmentationFault); ok { + ar, _ := addr.ToRange(uint64(len(src))) + if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil { + return done, err + } + continue + } + return done, translateIOError(ctx, err) + } +} + +// CopyIn implements usermem.IO.CopyIn. +func (mm *MemoryManager) CopyIn(ctx context.Context, addr usermem.Addr, dst []byte, opts usermem.IOOpts) (int, error) { + ar, ok := mm.checkIORange(addr, int64(len(dst))) + if !ok { + return 0, syserror.EFAULT + } + + if len(dst) == 0 { + return 0, nil + } + + // Do AddressSpace IO if applicable. + if mm.asioEnabled(opts) && len(dst) < copyMapMinBytes { + return mm.asCopyIn(ctx, addr, dst) + } + + // Go through internal mappings. + n64, err := mm.withInternalMappings(ctx, ar, usermem.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims) + return n, translateIOError(ctx, err) + }) + return int(n64), err +} + +func (mm *MemoryManager) asCopyIn(ctx context.Context, addr usermem.Addr, dst []byte) (int, error) { + var done int + for { + n, err := mm.as.CopyIn(addr+usermem.Addr(done), dst[done:]) + done += n + if err == nil { + return done, nil + } + if f, ok := err.(platform.SegmentationFault); ok { + ar, _ := addr.ToRange(uint64(len(dst))) + if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Read); err != nil { + return done, err + } + continue + } + return done, translateIOError(ctx, err) + } +} + +// ZeroOut implements usermem.IO.ZeroOut. +func (mm *MemoryManager) ZeroOut(ctx context.Context, addr usermem.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { + ar, ok := mm.checkIORange(addr, toZero) + if !ok { + return 0, syserror.EFAULT + } + + if toZero == 0 { + return 0, nil + } + + // Do AddressSpace IO if applicable. + if mm.asioEnabled(opts) && toZero < copyMapMinBytes { + return mm.asZeroOut(ctx, addr, toZero) + } + + // Go through internal mappings. + return mm.withInternalMappings(ctx, ar, usermem.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) { + n, err := safemem.ZeroSeq(dsts) + return n, translateIOError(ctx, err) + }) +} + +func (mm *MemoryManager) asZeroOut(ctx context.Context, addr usermem.Addr, toZero int64) (int64, error) { + var done int64 + for { + n, err := mm.as.ZeroOut(addr+usermem.Addr(done), uintptr(toZero-done)) + done += int64(n) + if err == nil { + return done, nil + } + if f, ok := err.(platform.SegmentationFault); ok { + ar, _ := addr.ToRange(uint64(toZero)) + if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.Write); err != nil { + return done, err + } + continue + } + return done, translateIOError(ctx, err) + } +} + +// CopyOutFrom implements usermem.IO.CopyOutFrom. +func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars usermem.AddrRangeSeq, src safemem.Reader, opts usermem.IOOpts) (int64, error) { + if !mm.checkIOVec(ars) { + return 0, syserror.EFAULT + } + + if ars.NumBytes() == 0 { + return 0, nil + } + + // Do AddressSpace IO if applicable. + if mm.asioEnabled(opts) && ars.NumBytes() < rwMapMinBytes { + // We have to introduce a buffered copy, instead of just passing a + // safemem.BlockSeq representing addresses in the AddressSpace to src. + // This is because usermem.IO.CopyOutFrom() guarantees that it calls + // src.ReadToBlocks() at most once, which is incompatible with handling + // faults between calls. In the future, this is probably best resolved + // by introducing a CopyOutFrom variant or option that allows it to + // call src.ReadToBlocks() any number of times. + // + // This issue applies to CopyInTo as well. + buf := make([]byte, int(ars.NumBytes())) + bufN, bufErr := src.ReadToBlocks(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf))) + var done int64 + for done < int64(bufN) { + ar := ars.Head() + cplen := int64(ar.Length()) + if cplen > int64(bufN)-done { + cplen = int64(bufN) - done + } + n, err := mm.asCopyOut(ctx, ar.Start, buf[int(done):int(done+cplen)]) + done += int64(n) + if err != nil { + return done, err + } + ars = ars.Tail() + } + // Do not convert errors returned by src to EFAULT. + return done, bufErr + } + + // Go through internal mappings. + return mm.withVecInternalMappings(ctx, ars, usermem.Write, opts.IgnorePermissions, src.ReadToBlocks) +} + +// CopyInTo implements usermem.IO.CopyInTo. +func (mm *MemoryManager) CopyInTo(ctx context.Context, ars usermem.AddrRangeSeq, dst safemem.Writer, opts usermem.IOOpts) (int64, error) { + if !mm.checkIOVec(ars) { + return 0, syserror.EFAULT + } + + if ars.NumBytes() == 0 { + return 0, nil + } + + // Do AddressSpace IO if applicable. + if mm.asioEnabled(opts) && ars.NumBytes() < rwMapMinBytes { + buf := make([]byte, int(ars.NumBytes())) + var done int + var bufErr error + for !ars.IsEmpty() { + ar := ars.Head() + var n int + n, bufErr = mm.asCopyIn(ctx, ar.Start, buf[done:done+int(ar.Length())]) + done += n + if bufErr != nil { + break + } + ars = ars.Tail() + } + n, err := dst.WriteFromBlocks(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(buf[:done]))) + if err != nil { + return int64(n), err + } + // Do not convert errors returned by dst to EFAULT. + return int64(n), bufErr + } + + // Go through internal mappings. + return mm.withVecInternalMappings(ctx, ars, usermem.Read, opts.IgnorePermissions, dst.WriteFromBlocks) +} + +// SwapUint32 implements usermem.IO.SwapUint32. +func (mm *MemoryManager) SwapUint32(ctx context.Context, addr usermem.Addr, new uint32, opts usermem.IOOpts) (uint32, error) { + ar, ok := mm.checkIORange(addr, 4) + if !ok { + return 0, syserror.EFAULT + } + + // Do AddressSpace IO if applicable. + if mm.haveASIO && opts.AddressSpaceActive && !opts.IgnorePermissions { + for { + old, err := mm.as.SwapUint32(addr, new) + if err == nil { + return old, nil + } + if f, ok := err.(platform.SegmentationFault); ok { + if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil { + return 0, err + } + continue + } + return 0, translateIOError(ctx, err) + } + } + + // Go through internal mappings. + var old uint32 + _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + if ims.NumBlocks() != 1 || ims.NumBytes() != 4 { + // Atomicity is unachievable across mappings. + return 0, syserror.EFAULT + } + im := ims.Head() + var err error + old, err = safemem.SwapUint32(im, new) + if err != nil { + return 0, translateIOError(ctx, err) + } + return 4, nil + }) + return old, err +} + +// CompareAndSwapUint32 implements usermem.IO.CompareAndSwapUint32. +func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr usermem.Addr, old, new uint32, opts usermem.IOOpts) (uint32, error) { + ar, ok := mm.checkIORange(addr, 4) + if !ok { + return 0, syserror.EFAULT + } + + // Do AddressSpace IO if applicable. + if mm.haveASIO && opts.AddressSpaceActive && !opts.IgnorePermissions { + for { + prev, err := mm.as.CompareAndSwapUint32(addr, old, new) + if err == nil { + return prev, nil + } + if f, ok := err.(platform.SegmentationFault); ok { + if err := mm.handleASIOFault(ctx, f.Addr, ar, usermem.ReadWrite); err != nil { + return 0, err + } + continue + } + return 0, translateIOError(ctx, err) + } + } + + // Go through internal mappings. + var prev uint32 + _, err := mm.withInternalMappings(ctx, ar, usermem.ReadWrite, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { + if ims.NumBlocks() != 1 || ims.NumBytes() != 4 { + // Atomicity is unachievable across mappings. + return 0, syserror.EFAULT + } + im := ims.Head() + var err error + prev, err = safemem.CompareAndSwapUint32(im, old, new) + if err != nil { + return 0, translateIOError(ctx, err) + } + return 4, nil + }) + return prev, err +} + +// handleASIOFault handles a page fault at address addr for an AddressSpaceIO +// operation spanning ioar. +// +// Preconditions: mm.as != nil. ioar.Length() != 0. ioar.Contains(addr). +func (mm *MemoryManager) handleASIOFault(ctx context.Context, addr usermem.Addr, ioar usermem.AddrRange, at usermem.AccessType) error { + // Try to map all remaining pages in the I/O operation. This RoundUp can't + // overflow because otherwise it would have been caught by checkIORange. + end, _ := ioar.End.RoundUp() + ar := usermem.AddrRange{addr.RoundDown(), end} + + // Don't bother trying existingPMAsLocked; in most cases, if we did have + // existing pmas, we wouldn't have faulted. + + // Ensure that we have usable vmas. Here and below, only return early if we + // can't map the first (faulting) page; failure to map later pages are + // silently ignored. This maximizes partial success. + mm.mappingMu.RLock() + vseg, vend, err := mm.getVMAsLocked(ctx, ar, at, false) + if vendaddr := vend.Start(); vendaddr < ar.End { + if vendaddr <= ar.Start { + mm.mappingMu.RUnlock() + return translateIOError(ctx, err) + } + ar.End = vendaddr + } + + // Ensure that we have usable pmas. + mm.activeMu.Lock() + pseg, pend, err := mm.getPMAsLocked(ctx, vseg, ar, pmaOpts{ + breakCOW: at.Write, + }) + mm.mappingMu.RUnlock() + if pendaddr := pend.Start(); pendaddr < ar.End { + if pendaddr <= ar.Start { + mm.activeMu.Unlock() + return translateIOError(ctx, err) + } + ar.End = pendaddr + } + + // Downgrade to a read-lock on activeMu since we don't need to mutate pmas + // anymore. + mm.activeMu.DowngradeLock() + + err = mm.mapASLocked(pseg, ar, false) + mm.activeMu.RUnlock() + return translateIOError(ctx, err) +} + +// withInternalMappings ensures that pmas exist for all addresses in ar, +// support access of type (at, ignorePermissions), and have internal mappings +// cached. It then calls f with mm.activeMu locked for reading, passing +// internal mappings for the subrange of ar for which this property holds. +// +// withInternalMappings takes a function returning uint64 since many safemem +// functions have this property, but returns an int64 since this is usually +// more useful for usermem.IO methods. +// +// Preconditions: 0 < ar.Length() <= math.MaxInt64. +func (mm *MemoryManager) withInternalMappings(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { + po := pmaOpts{ + breakCOW: at.Write, + } + + // If pmas are already available, we can do IO without touching mm.vmas or + // mm.mappingMu. + mm.activeMu.RLock() + if pseg := mm.existingPMAsLocked(ar, at, ignorePermissions, po, true /* needInternalMappings */); pseg.Ok() { + n, err := f(mm.internalMappingsLocked(pseg, ar)) + mm.activeMu.RUnlock() + // Do not convert errors returned by f to EFAULT. + return int64(n), err + } + mm.activeMu.RUnlock() + + // Ensure that we have usable vmas. + mm.mappingMu.RLock() + vseg, vend, verr := mm.getVMAsLocked(ctx, ar, at, ignorePermissions) + if vendaddr := vend.Start(); vendaddr < ar.End { + if vendaddr <= ar.Start { + mm.mappingMu.RUnlock() + return 0, translateIOError(ctx, verr) + } + ar.End = vendaddr + } + + // Ensure that we have usable pmas. + mm.activeMu.Lock() + pseg, pend, perr := mm.getPMAsLocked(ctx, vseg, ar, po) + mm.mappingMu.RUnlock() + if pendaddr := pend.Start(); pendaddr < ar.End { + if pendaddr <= ar.Start { + mm.activeMu.Unlock() + return 0, translateIOError(ctx, perr) + } + ar.End = pendaddr + } + imend, imerr := mm.getPMAInternalMappingsLocked(pseg, ar) + mm.activeMu.DowngradeLock() + if imendaddr := imend.Start(); imendaddr < ar.End { + if imendaddr <= ar.Start { + mm.activeMu.RUnlock() + return 0, translateIOError(ctx, imerr) + } + ar.End = imendaddr + } + + // Do I/O. + un, err := f(mm.internalMappingsLocked(pseg, ar)) + mm.activeMu.RUnlock() + n := int64(un) + + // Return the first error in order of progress through ar. + if err != nil { + // Do not convert errors returned by f to EFAULT. + return n, err + } + if imerr != nil { + return n, translateIOError(ctx, imerr) + } + if perr != nil { + return n, translateIOError(ctx, perr) + } + return n, translateIOError(ctx, verr) +} + +// withVecInternalMappings ensures that pmas exist for all addresses in ars, +// support access of type (at, ignorePermissions), and have internal mappings +// cached. It then calls f with mm.activeMu locked for reading, passing +// internal mappings for the subset of ars for which this property holds. +// +// Preconditions: !ars.IsEmpty(). +func (mm *MemoryManager) withVecInternalMappings(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, f func(safemem.BlockSeq) (uint64, error)) (int64, error) { + // withInternalMappings is faster than withVecInternalMappings because of + // iterator plumbing (this isn't generally practical in the vector case due + // to iterator invalidation between AddrRanges). Use it if possible. + if ars.NumRanges() == 1 { + return mm.withInternalMappings(ctx, ars.Head(), at, ignorePermissions, f) + } + + po := pmaOpts{ + breakCOW: at.Write, + } + + // If pmas are already available, we can do IO without touching mm.vmas or + // mm.mappingMu. + mm.activeMu.RLock() + if mm.existingVecPMAsLocked(ars, at, ignorePermissions, po, true /* needInternalMappings */) { + n, err := f(mm.vecInternalMappingsLocked(ars)) + mm.activeMu.RUnlock() + // Do not convert errors returned by f to EFAULT. + return int64(n), err + } + mm.activeMu.RUnlock() + + // Ensure that we have usable vmas. + mm.mappingMu.RLock() + vars, verr := mm.getVecVMAsLocked(ctx, ars, at, ignorePermissions) + if vars.NumBytes() == 0 { + mm.mappingMu.RUnlock() + return 0, translateIOError(ctx, verr) + } + + // Ensure that we have usable pmas. + mm.activeMu.Lock() + pars, perr := mm.getVecPMAsLocked(ctx, vars, po) + mm.mappingMu.RUnlock() + if pars.NumBytes() == 0 { + mm.activeMu.Unlock() + return 0, translateIOError(ctx, perr) + } + imars, imerr := mm.getVecPMAInternalMappingsLocked(pars) + mm.activeMu.DowngradeLock() + if imars.NumBytes() == 0 { + mm.activeMu.RUnlock() + return 0, translateIOError(ctx, imerr) + } + + // Do I/O. + un, err := f(mm.vecInternalMappingsLocked(imars)) + mm.activeMu.RUnlock() + n := int64(un) + + // Return the first error in order of progress through ars. + if err != nil { + // Do not convert errors from f to EFAULT. + return n, err + } + if imerr != nil { + return n, translateIOError(ctx, imerr) + } + if perr != nil { + return n, translateIOError(ctx, perr) + } + return n, translateIOError(ctx, verr) +} + +// truncatedAddrRangeSeq returns a copy of ars, but with the end truncated to +// at most address end on AddrRange arsit.Head(). It is used in vector I/O paths to +// truncate usermem.AddrRangeSeq when errors occur. +// +// Preconditions: !arsit.IsEmpty(). end <= arsit.Head().End. +func truncatedAddrRangeSeq(ars, arsit usermem.AddrRangeSeq, end usermem.Addr) usermem.AddrRangeSeq { + ar := arsit.Head() + if end <= ar.Start { + return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes()) + } + return ars.TakeFirst64(ars.NumBytes() - arsit.NumBytes() + int64(end-ar.Start)) +} diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go new file mode 100644 index 000000000..de7f29b04 --- /dev/null +++ b/pkg/sentry/mm/lifecycle.go @@ -0,0 +1,218 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/atomicbitops" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// NewMemoryManager returns a new MemoryManager with no mappings and 1 user. +func NewMemoryManager(p platform.Platform) *MemoryManager { + return &MemoryManager{ + p: p, + haveASIO: p.SupportsAddressSpaceIO(), + privateRefs: &privateRefs{}, + users: 1, + auxv: arch.Auxv{}, + aioManager: aioManager{contexts: make(map[uint64]*AIOContext)}, + } +} + +// SetMmapLayout initializes mm's layout from the given arch.Context. +// +// Preconditions: mm contains no mappings and is not used concurrently. +func (mm *MemoryManager) SetMmapLayout(ac arch.Context, r *limits.LimitSet) (arch.MmapLayout, error) { + layout, err := ac.NewMmapLayout(mm.p.MinUserAddress(), mm.p.MaxUserAddress(), r) + if err != nil { + return arch.MmapLayout{}, err + } + mm.layout = layout + return layout, nil +} + +// Fork creates a copy of mm with 1 user, as for Linux syscalls fork() or +// clone() (without CLONE_VM). +func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + mm2 := &MemoryManager{ + p: mm.p, + haveASIO: mm.haveASIO, + layout: mm.layout, + privateRefs: mm.privateRefs, + users: 1, + usageAS: mm.usageAS, + brk: mm.brk, + captureInvalidations: true, + argv: mm.argv, + envv: mm.envv, + auxv: append(arch.Auxv(nil), mm.auxv...), + // IncRef'd below, once we know that there isn't an error. + executable: mm.executable, + aioManager: aioManager{contexts: make(map[uint64]*AIOContext)}, + } + + // Copy vmas. + dstvgap := mm2.vmas.FirstGap() + for srcvseg := mm.vmas.FirstSegment(); srcvseg.Ok(); srcvseg = srcvseg.NextSegment() { + vma := srcvseg.ValuePtr() + vmaAR := srcvseg.Range() + // Inform the Mappable, if any, of the new mapping. + if vma.mappable != nil { + if err := vma.mappable.AddMapping(ctx, mm2, vmaAR, vma.off); err != nil { + mm2.removeVMAsLocked(ctx, mm2.applicationAddrRange()) + return nil, err + } + } + if vma.id != nil { + vma.id.IncRef() + } + dstvgap = mm2.vmas.Insert(dstvgap, vmaAR, *vma).NextGap() + // We don't need to update mm2.usageAS since we copied it from mm + // above. + } + + // Copy pmas. We have to lock mm.activeMu for writing to make existing + // private pmas copy-on-write. We also have to lock mm2.activeMu since + // after copying vmas above, memmap.Mappables may call mm2.Invalidate. We + // only copy private pmas, since in the common case where fork(2) is + // immediately followed by execve(2), copying non-private pmas that can be + // regenerated by calling memmap.Mappable.Translate is a waste of time. + // (Linux does the same; compare kernel/fork.c:dup_mmap() => + // mm/memory.c:copy_page_range().) + mm2.activeMu.Lock() + defer mm2.activeMu.Unlock() + mm.activeMu.Lock() + defer mm.activeMu.Unlock() + dstpgap := mm2.pmas.FirstGap() + var unmapAR usermem.AddrRange + for srcpseg := mm.pmas.FirstSegment(); srcpseg.Ok(); srcpseg = srcpseg.NextSegment() { + pma := srcpseg.ValuePtr() + if !pma.private { + continue + } + if !pma.needCOW { + pma.needCOW = true + if pma.vmaEffectivePerms.Write { + // We don't want to unmap the whole address space, even though + // doing so would reduce calls to unmapASLocked(), because mm + // will most likely continue to be used after the fork, so + // unmapping pmas unnecessarily will result in extra page + // faults. But we do want to merge consecutive AddrRanges + // across pma boundaries. + if unmapAR.End == srcpseg.Start() { + unmapAR.End = srcpseg.End() + } else { + if unmapAR.Length() != 0 { + mm.unmapASLocked(unmapAR) + } + unmapAR = srcpseg.Range() + } + } + } + fr := srcpseg.fileRange() + mm2.incPrivateRef(fr) + srcpseg.ValuePtr().file.IncRef(fr) + addrRange := srcpseg.Range() + mm2.addRSSLocked(addrRange) + dstpgap = mm2.pmas.Insert(dstpgap, addrRange, *pma).NextGap() + } + if unmapAR.Length() != 0 { + mm.unmapASLocked(unmapAR) + } + + // Between when we call memmap.Mappable.AddMapping while copying vmas and + // when we lock mm2.activeMu to copy pmas, calls to mm2.Invalidate() are + // ineffective because the pmas they invalidate haven't yet been copied, + // possibly allowing mm2 to get invalidated translations: + // + // Invalidating Mappable mm.Fork + // --------------------- ------- + // + // mm2.Invalidate() + // mm.activeMu.Lock() + // mm.Invalidate() /* blocks */ + // mm2.activeMu.Lock() + // (mm copies invalidated pma to mm2) + // + // This would technically be both safe (since we only copy private pmas, + // which will still hold a reference on their memory) and consistent with + // Linux, but we avoid it anyway by setting mm2.captureInvalidations during + // construction, causing calls to mm2.Invalidate() to be captured in + // mm2.capturedInvalidations, to be replayed after pmas are copied - i.e. + // here. + mm2.captureInvalidations = false + for _, invArgs := range mm2.capturedInvalidations { + mm2.invalidateLocked(invArgs.ar, invArgs.opts.InvalidatePrivate, true) + } + mm2.capturedInvalidations = nil + + if mm2.executable != nil { + mm2.executable.IncRef() + } + return mm2, nil +} + +// IncUsers increments mm's user count and returns true. If the user count is +// already 0, IncUsers does nothing and returns false. +func (mm *MemoryManager) IncUsers() bool { + return atomicbitops.IncUnlessZeroInt32(&mm.users) +} + +// DecUsers decrements mm's user count. If the user count reaches 0, all +// mappings in mm are unmapped. +func (mm *MemoryManager) DecUsers(ctx context.Context) { + if users := atomic.AddInt32(&mm.users, -1); users > 0 { + return + } else if users < 0 { + panic(fmt.Sprintf("Invalid MemoryManager.users: %d", users)) + } + + mm.aioManager.destroy() + + mm.metadataMu.Lock() + exe := mm.executable + mm.executable = nil + mm.metadataMu.Unlock() + if exe != nil { + exe.DecRef() + } + + mm.activeMu.Lock() + // Sanity check. + if atomic.LoadInt32(&mm.active) != 0 { + panic("active address space lost?") + } + // Make sure the AddressSpace is returned. + if mm.as != nil { + mm.as.Release() + mm.as = nil + } + mm.activeMu.Unlock() + + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + mm.unmapLocked(ctx, mm.applicationAddrRange()) +} diff --git a/pkg/sentry/mm/metadata.go b/pkg/sentry/mm/metadata.go new file mode 100644 index 000000000..32d5e2ff6 --- /dev/null +++ b/pkg/sentry/mm/metadata.go @@ -0,0 +1,139 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// ArgvStart returns the start of the application argument vector. +// +// There is no guarantee that this value is sensible w.r.t. ArgvEnd. +func (mm *MemoryManager) ArgvStart() usermem.Addr { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return mm.argv.Start +} + +// SetArgvStart sets the start of the application argument vector. +func (mm *MemoryManager) SetArgvStart(a usermem.Addr) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.argv.Start = a +} + +// ArgvEnd returns the end of the application argument vector. +// +// There is no guarantee that this value is sensible w.r.t. ArgvStart. +func (mm *MemoryManager) ArgvEnd() usermem.Addr { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return mm.argv.End +} + +// SetArgvEnd sets the end of the application argument vector. +func (mm *MemoryManager) SetArgvEnd(a usermem.Addr) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.argv.End = a +} + +// EnvvStart returns the start of the application environment vector. +// +// There is no guarantee that this value is sensible w.r.t. EnvvEnd. +func (mm *MemoryManager) EnvvStart() usermem.Addr { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return mm.envv.Start +} + +// SetEnvvStart sets the start of the application environment vector. +func (mm *MemoryManager) SetEnvvStart(a usermem.Addr) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.envv.Start = a +} + +// EnvvEnd returns the end of the application environment vector. +// +// There is no guarantee that this value is sensible w.r.t. EnvvStart. +func (mm *MemoryManager) EnvvEnd() usermem.Addr { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return mm.envv.End +} + +// SetEnvvEnd sets the end of the application environment vector. +func (mm *MemoryManager) SetEnvvEnd(a usermem.Addr) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.envv.End = a +} + +// Auxv returns the current map of auxiliary vectors. +func (mm *MemoryManager) Auxv() arch.Auxv { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return append(arch.Auxv(nil), mm.auxv...) +} + +// SetAuxv sets the entire map of auxiliary vectors. +func (mm *MemoryManager) SetAuxv(auxv arch.Auxv) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.auxv = append(arch.Auxv(nil), auxv...) +} + +// Executable returns the executable, if available. +// +// An additional reference will be taken in the case of a non-nil executable, +// which must be released by the caller. +func (mm *MemoryManager) Executable() *fs.Dirent { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + + if mm.executable == nil { + return nil + } + + mm.executable.IncRef() + return mm.executable +} + +// SetExecutable sets the executable. +// +// This takes a reference on d. +func (mm *MemoryManager) SetExecutable(d *fs.Dirent) { + mm.metadataMu.Lock() + + // Grab a new reference. + d.IncRef() + + // Set the executable. + orig := mm.executable + mm.executable = d + + mm.metadataMu.Unlock() + + // Release the old reference. + // + // Do this without holding the lock, since it may wind up doing some + // I/O to sync the dirent, etc. + if orig != nil { + orig.DecRef() + } +} diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go new file mode 100644 index 000000000..ce8097b7f --- /dev/null +++ b/pkg/sentry/mm/mm.go @@ -0,0 +1,417 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mm provides a memory management subsystem. See README.md for a +// detailed overview. +// +// Lock order: +// +// fs locks, except for memmap.Mappable locks +// mm.MemoryManager.metadataMu +// mm.MemoryManager.mappingMu +// Locks taken by memmap.Mappable methods other than Translate +// mm.MemoryManager.activeMu +// Locks taken by memmap.Mappable.Translate +// mm.privateRefs.mu +// platform.File locks +// mm.aioManager.mu +// mm.AIOContext.mu +// +// Only mm.MemoryManager.Fork is permitted to lock mm.MemoryManager.activeMu in +// multiple mm.MemoryManagers, as it does so in a well-defined order (forked +// child first). +package mm + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + ssync "gvisor.googlesource.com/gvisor/pkg/sync" +) + +// MemoryManager implements a virtual address space. +type MemoryManager struct { + // p is the platform. + // + // p is immutable. + p platform.Platform + + // haveASIO is the cached result of p.SupportsAddressSpaceIO(). Aside from + // eliminating an indirect call in the hot I/O path, this makes + // MemoryManager.asioEnabled() a leaf function, allowing it to be inlined. + // + // haveASIO is immutable. + haveASIO bool `state:"nosave"` + + // layout is the memory layout. + // + // layout is set by the binary loader before the MemoryManager can be used. + layout arch.MmapLayout + + // privateRefs stores reference counts for private memory (memory whose + // ownership is shared by one or more pmas instead of being owned by a + // memmap.Mappable). + // + // NOTE: This should be replaced using refcounts on + // platform.File. + // + // privateRefs is immutable. + privateRefs *privateRefs + + // users is the number of dependences on the mappings in the MemoryManager. + // When the number of references in users reaches zero, all mappings are + // unmapped. + // + // users is accessed using atomic memory operations. + users int32 + + // mappingMu is analogous to Linux's struct mm_struct::mmap_sem. + mappingMu ssync.DowngradableRWMutex `state:"nosave"` + + // vmas stores virtual memory areas. Since vmas are stored by value, + // clients should usually use vmaIterator.ValuePtr() instead of + // vmaIterator.Value() to get a pointer to the vma rather than a copy. + // + // Invariants: vmas are always page-aligned. + // + // vmas is protected by mappingMu. + vmas vmaSet + + // usageAS is vmas.Span(), cached to accelerate RLIMIT_AS checks. + // + // usageAS is protected by mappingMu. + usageAS uint64 + + // brk is the mm's brk, which is manipulated using the brk(2) system call. + // The brk is initially set up by the loader which maps an executable + // binary into the mm. + // + // brk is protected by mappingMu. + brk usermem.AddrRange + + // activeMu is loosely analogous to Linux's struct + // mm_struct::page_table_lock. + activeMu ssync.DowngradableRWMutex `state:"nosave"` + + // pmas stores platform mapping areas used to implement vmas. Since pmas + // are stored by value, clients should usually use pmaIterator.ValuePtr() + // instead of pmaIterator.Value() to get a pointer to the pma rather than + // a copy. + // + // Inserting or removing segments from pmas should happen along with a + // call to mm.insertRSS or mm.removeRSS. + // + // Invariants: pmas are always page-aligned. If a pma exists for a given + // address, a vma must also exist for that address. + // + // pmas is protected by activeMu. + pmas pmaSet + + // curRSS is pmas.Span(), cached to accelerate updates to maxRSS. It is + // reported as the MemoryManager's RSS. + // + // maxRSS should be modified only via insertRSS and removeRSS, not + // directly. + // + // maxRSS is protected by activeMu. + curRSS uint64 + + // maxRSS is the maximum resident set size in bytes of a MemoryManager. + // It is tracked as the application adds and removes mappings to pmas. + // + // maxRSS should be modified only via insertRSS, not directly. + // + // maxRSS is protected by activeMu. + maxRSS uint64 + + // as is the platform.AddressSpace that pmas are mapped into. active is the + // number of contexts that require as to be non-nil; if active == 0, as may + // be nil. + // + // as is protected by activeMu. active is manipulated with atomic memory + // operations; transitions to and from zero are additionally protected by + // activeMu. (This is because such transitions may need to be atomic with + // changes to as.) + as platform.AddressSpace `state:"nosave"` + active int32 `state:"zerovalue"` + + // unmapAllOnActivate indicates that the next Activate call should activate + // an empty AddressSpace. + // + // This is used to ensure that an AddressSpace cached in + // NewAddressSpace is not used after some change in the MemoryManager + // or VMAs has made that AddressSpace stale. + // + // unmapAllOnActivate is protected by activeMu. It must only be set when + // there is no active or cached AddressSpace. If as != nil, then + // invalidations should be propagated immediately. + unmapAllOnActivate bool `state:"nosave"` + + // If captureInvalidations is true, calls to MM.Invalidate() are recorded + // in capturedInvalidations rather than being applied immediately to pmas. + // This is to avoid a race condition in MM.Fork(); see that function for + // details. + // + // Both captureInvalidations and capturedInvalidations are protected by + // activeMu. Neither need to be saved since captureInvalidations is only + // enabled during MM.Fork(), during which saving can't occur. + captureInvalidations bool `state:"zerovalue"` + capturedInvalidations []invalidateArgs `state:"nosave"` + + metadataMu sync.Mutex `state:"nosave"` + + // argv is the application argv. This is set up by the loader and may be + // modified by prctl(PR_SET_MM_ARG_START/PR_SET_MM_ARG_END). No + // requirements apply to argv; we do not require that argv.WellFormed(). + // + // argv is protected by metadataMu. + argv usermem.AddrRange + + // envv is the application envv. This is set up by the loader and may be + // modified by prctl(PR_SET_MM_ENV_START/PR_SET_MM_ENV_END). No + // requirements apply to envv; we do not require that envv.WellFormed(). + // + // envv is protected by metadataMu. + envv usermem.AddrRange + + // auxv is the ELF's auxiliary vector. + // + // auxv is protected by metadataMu. + auxv arch.Auxv + + // executable is the executable for this MemoryManager. If executable + // is not nil, it holds a reference on the Dirent. + // + // executable is protected by metadataMu. + executable *fs.Dirent + + // aioManager keeps track of AIOContexts used for async IOs. AIOManager + // must be cloned when CLONE_VM is used. + aioManager aioManager +} + +// vma represents a virtual memory area. +type vma struct { + // mappable is the virtual memory object mapped by this vma. If mappable is + // nil, the vma represents a private anonymous mapping. + mappable memmap.Mappable + + // off is the offset into mappable at which this vma begins. If mappable is + // nil, off is meaningless. + off uint64 + + // To speedup VMA save/restore, we group and save the following booleans + // as a single integer. + + // realPerms are the memory permissions on this vma, as defined by the + // application. + realPerms usermem.AccessType `state:".(int)"` + + // effectivePerms are the memory permissions on this vma which are + // actually used to control access. + // + // Invariant: effectivePerms == realPerms.Effective(). + effectivePerms usermem.AccessType `state:"manual"` + + // maxPerms limits the set of permissions that may ever apply to this + // memory, as well as accesses for which usermem.IOOpts.IgnorePermissions + // is true (e.g. ptrace(PTRACE_POKEDATA)). + // + // Invariant: maxPerms == maxPerms.Effective(). + maxPerms usermem.AccessType `state:"manual"` + + // private is true if this is a MAP_PRIVATE mapping, such that writes to + // the mapping are propagated to a copy. + private bool `state:"manual"` + + // growsDown is true if the mapping may be automatically extended downward + // under certain conditions. If growsDown is true, mappable must be nil. + // + // There is currently no corresponding growsUp flag; in Linux, the only + // architectures that can have VM_GROWSUP mappings are ia64, parisc, and + // metag, none of which we currently support. + growsDown bool `state:"manual"` + + // If id is not nil, it controls the lifecycle of mappable and provides vma + // metadata shown in /proc/[pid]/maps, and the vma holds a reference. + id memmap.MappingIdentity + + // If hint is non-empty, it is a description of the vma printed in + // /proc/[pid]/maps. hint takes priority over id.MappedName(). + hint string +} + +const ( + vmaRealPermsRead = 1 << iota + vmaRealPermsWrite + vmaRealPermsExecute + vmaEffectivePermsRead + vmaEffectivePermsWrite + vmaEffectivePermsExecute + vmaMaxPermsRead + vmaMaxPermsWrite + vmaMaxPermsExecute + vmaPrivate + vmaGrowsDown +) + +func (v *vma) saveRealPerms() int { + var b int + if v.realPerms.Read { + b |= vmaRealPermsRead + } + if v.realPerms.Write { + b |= vmaRealPermsWrite + } + if v.realPerms.Execute { + b |= vmaRealPermsExecute + } + if v.effectivePerms.Read { + b |= vmaEffectivePermsRead + } + if v.effectivePerms.Write { + b |= vmaEffectivePermsWrite + } + if v.effectivePerms.Execute { + b |= vmaEffectivePermsExecute + } + if v.maxPerms.Read { + b |= vmaMaxPermsRead + } + if v.maxPerms.Write { + b |= vmaMaxPermsWrite + } + if v.maxPerms.Execute { + b |= vmaMaxPermsExecute + } + if v.private { + b |= vmaPrivate + } + if v.growsDown { + b |= vmaGrowsDown + } + return b +} + +func (v *vma) loadRealPerms(b int) { + if b&vmaRealPermsRead > 0 { + v.realPerms.Read = true + } + if b&vmaRealPermsWrite > 0 { + v.realPerms.Write = true + } + if b&vmaRealPermsExecute > 0 { + v.realPerms.Execute = true + } + if b&vmaEffectivePermsRead > 0 { + v.effectivePerms.Read = true + } + if b&vmaEffectivePermsWrite > 0 { + v.effectivePerms.Write = true + } + if b&vmaEffectivePermsExecute > 0 { + v.effectivePerms.Execute = true + } + if b&vmaMaxPermsRead > 0 { + v.maxPerms.Read = true + } + if b&vmaMaxPermsWrite > 0 { + v.maxPerms.Write = true + } + if b&vmaMaxPermsExecute > 0 { + v.maxPerms.Execute = true + } + if b&vmaPrivate > 0 { + v.private = true + } + if b&vmaGrowsDown > 0 { + v.growsDown = true + } +} + +// pma represents a platform mapping area. +type pma struct { + // file is the file mapped by this pma. Only pmas for which file == + // platform.Platform.Memory() may be saved. pmas hold a reference to the + // corresponding file range while they exist. + file platform.File `state:"nosave"` + + // off is the offset into file at which this pma begins. + off uint64 + + // vmaEffectivePerms and vmaMaxPerms are duplicated from the + // corresponding vma so that the IO implementation can avoid iterating + // mm.vmas when pmas already exist. + vmaEffectivePerms usermem.AccessType + vmaMaxPerms usermem.AccessType + + // needCOW is true if writes to the mapping must be propagated to a copy. + needCOW bool + + // private is true if this pma represents private memory. + // + // If private is true, file must be platform.Platform.Memory(), the pma + // holds a reference on the mapped memory that is tracked in privateRefs, + // and calls to Invalidate for which + // memmap.InvalidateOpts.InvalidatePrivate is false should ignore the pma. + // + // If private is false, this pma caches a translation from the + // corresponding vma's memmap.Mappable.Translate. + private bool + + // If internalMappings is not empty, it is the cached return value of + // file.MapInternal for the platform.FileRange mapped by this pma. + internalMappings safemem.BlockSeq `state:"nosave"` +} + +type privateRefs struct { + mu sync.Mutex `state:"nosave"` + + // refs maps offsets into Platform.Memory() to the number of pmas (or, + // equivalently, MemoryManagers) that share ownership of the memory at that + // offset. + refs fileRefcountSet +} + +type invalidateArgs struct { + ar usermem.AddrRange + opts memmap.InvalidateOpts +} + +// fileRefcountSetFunctions implements segment.Functions for fileRefcountSet. +type fileRefcountSetFunctions struct{} + +func (fileRefcountSetFunctions) MinKey() uint64 { + return 0 +} + +func (fileRefcountSetFunctions) MaxKey() uint64 { + return ^uint64(0) +} + +func (fileRefcountSetFunctions) ClearValue(_ *int32) { +} + +func (fileRefcountSetFunctions) Merge(_ platform.FileRange, rc1 int32, _ platform.FileRange, rc2 int32) (int32, bool) { + return rc1, rc1 == rc2 +} + +func (fileRefcountSetFunctions) Split(_ platform.FileRange, rc int32, _ uint64) (int32, int32) { + return rc, rc +} diff --git a/pkg/sentry/mm/mm_test.go b/pkg/sentry/mm/mm_test.go new file mode 100644 index 000000000..b47aa7263 --- /dev/null +++ b/pkg/sentry/mm/mm_test.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/context/contexttest" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func testMemoryManager(ctx context.Context) *MemoryManager { + p := platform.FromContext(ctx) + mm := NewMemoryManager(p) + mm.layout = arch.MmapLayout{ + MinAddr: p.MinUserAddress(), + MaxAddr: p.MaxUserAddress(), + BottomUpBase: p.MinUserAddress(), + TopDownBase: p.MaxUserAddress(), + } + return mm +} + +func (mm *MemoryManager) realUsageAS() uint64 { + return uint64(mm.vmas.Span()) +} + +func TestUsageASUpdates(t *testing.T) { + ctx := contexttest.Context(t) + mm := testMemoryManager(ctx) + defer mm.DecUsers(ctx) + + addr, err := mm.MMap(ctx, memmap.MMapOpts{ + Length: 2 * usermem.PageSize, + }) + if err != nil { + t.Fatalf("MMap got err %v want nil", err) + } + realUsage := mm.realUsageAS() + if mm.usageAS != realUsage { + t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) + } + + mm.MUnmap(ctx, addr, usermem.PageSize) + realUsage = mm.realUsageAS() + if mm.usageAS != realUsage { + t.Fatalf("usageAS believes %v bytes are mapped; %v bytes are actually mapped", mm.usageAS, realUsage) + } +} + +func TestBrkDataLimitUpdates(t *testing.T) { + limitSet := limits.NewLimitSet() + limitSet.Set(limits.Data, limits.Limit{}) // zero RLIMIT_DATA + + ctx := contexttest.WithLimitSet(contexttest.Context(t), limitSet) + mm := testMemoryManager(ctx) + defer mm.DecUsers(ctx) + + // Try to extend the brk by one page and expect doing so to fail. + oldBrk, _ := mm.Brk(ctx, 0) + if newBrk, _ := mm.Brk(ctx, oldBrk+usermem.PageSize); newBrk != oldBrk { + t.Errorf("brk() increased data segment above RLIMIT_DATA (old brk = %#x, new brk = %#x", oldBrk, newBrk) + } +} + +// TestIOAfterUnmap ensures that IO fails after unmap. +func TestIOAfterUnmap(t *testing.T) { + ctx := contexttest.Context(t) + mm := testMemoryManager(ctx) + defer mm.DecUsers(ctx) + + addr, err := mm.MMap(ctx, memmap.MMapOpts{ + Length: usermem.PageSize, + Private: true, + Perms: usermem.Read, + MaxPerms: usermem.AnyAccess, + }) + if err != nil { + t.Fatalf("MMap got err %v want nil", err) + } + + // IO works before munmap. + b := make([]byte, 1) + n, err := mm.CopyIn(ctx, addr, b, usermem.IOOpts{}) + if err != nil { + t.Errorf("CopyIn got err %v want nil", err) + } + if n != 1 { + t.Errorf("CopyIn got %d want 1", n) + } + + err = mm.MUnmap(ctx, addr, usermem.PageSize) + if err != nil { + t.Fatalf("MUnmap got err %v want nil", err) + } + + n, err = mm.CopyIn(ctx, addr, b, usermem.IOOpts{}) + if err != syserror.EFAULT { + t.Errorf("CopyIn got err %v want EFAULT", err) + } + if n != 0 { + t.Errorf("CopyIn got %d want 0", n) + } +} + +// TestIOAfterMProtect tests IO interaction with mprotect permissions. +func TestIOAfterMProtect(t *testing.T) { + ctx := contexttest.Context(t) + mm := testMemoryManager(ctx) + defer mm.DecUsers(ctx) + + addr, err := mm.MMap(ctx, memmap.MMapOpts{ + Length: usermem.PageSize, + Private: true, + Perms: usermem.ReadWrite, + MaxPerms: usermem.AnyAccess, + }) + if err != nil { + t.Fatalf("MMap got err %v want nil", err) + } + + // Writing works before mprotect. + b := make([]byte, 1) + n, err := mm.CopyOut(ctx, addr, b, usermem.IOOpts{}) + if err != nil { + t.Errorf("CopyOut got err %v want nil", err) + } + if n != 1 { + t.Errorf("CopyOut got %d want 1", n) + } + + err = mm.MProtect(addr, usermem.PageSize, usermem.Read, false) + if err != nil { + t.Errorf("MProtect got err %v want nil", err) + } + + // Without IgnorePermissions, CopyOut should no longer succeed. + n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{}) + if err != syserror.EFAULT { + t.Errorf("CopyOut got err %v want EFAULT", err) + } + if n != 0 { + t.Errorf("CopyOut got %d want 0", n) + } + + // With IgnorePermissions, CopyOut should succeed despite mprotect. + n, err = mm.CopyOut(ctx, addr, b, usermem.IOOpts{ + IgnorePermissions: true, + }) + if err != nil { + t.Errorf("CopyOut got err %v want nil", err) + } + if n != 1 { + t.Errorf("CopyOut got %d want 1", n) + } +} diff --git a/pkg/sentry/mm/pma.go b/pkg/sentry/mm/pma.go new file mode 100644 index 000000000..35e873762 --- /dev/null +++ b/pkg/sentry/mm/pma.go @@ -0,0 +1,928 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +type pmaOpts struct { + // If breakCOW is true, pmas must not be copy-on-write. + breakCOW bool +} + +// existingPMAsLocked checks that pmas exist for all addresses in ar, and +// support access of type (at, ignorePermissions). If so, it returns an +// iterator to the pma containing ar.Start. Otherwise it returns a terminal +// iterator. +// +// Preconditions: mm.activeMu must be locked. ar.Length() != 0. +func (mm *MemoryManager) existingPMAsLocked(ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool, opts pmaOpts, needInternalMappings bool) pmaIterator { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + first := mm.pmas.FindSegment(ar.Start) + pseg := first + for pseg.Ok() { + pma := pseg.ValuePtr() + perms := pma.vmaEffectivePerms + if ignorePermissions { + perms = pma.vmaMaxPerms + } + if !perms.SupersetOf(at) { + // These are the vma's permissions, so the caller will get an error + // when they try to get new pmas. + return pmaIterator{} + } + if opts.breakCOW && pma.needCOW { + return pmaIterator{} + } + if needInternalMappings && pma.internalMappings.IsEmpty() { + return pmaIterator{} + } + + if ar.End <= pseg.End() { + return first + } + pseg, _ = pseg.NextNonEmpty() + } + + // Ran out of pmas before reaching ar.End. + return pmaIterator{} +} + +// existingVecPMAsLocked returns true if pmas exist for all addresses in ars, +// and support access of type (at, ignorePermissions). +// +// Preconditions: mm.activeMu must be locked. +func (mm *MemoryManager) existingVecPMAsLocked(ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool, opts pmaOpts, needInternalMappings bool) bool { + for ; !ars.IsEmpty(); ars = ars.Tail() { + if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, opts, needInternalMappings).Ok() { + return false + } + } + return true +} + +// getPMAsLocked ensures that pmas exist for all addresses in ar, subject to +// opts. It returns: +// +// - An iterator to the pma containing ar.Start. If no pma contains ar.Start, +// the iterator is unspecified. +// +// - An iterator to the gap after the last pma containing an address in ar. If +// pmas exist for no addresses in ar, the iterator is to a gap that begins +// before ar.Start. +// +// - An error that is non-nil if pmas exist for only a subset of ar. +// +// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for +// writing. ar.Length() != 0. vseg.Range().Contains(ar.Start). vmas must exist +// for all addresses in ar. +func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, opts pmaOpts) (pmaIterator, pmaGapIterator, error) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !vseg.Ok() { + panic("terminal vma iterator") + } + if !vseg.Range().Contains(ar.Start) { + panic(fmt.Sprintf("initial vma %v does not cover start of ar %v", vseg.Range(), ar)) + } + } + + // Page-align ar so that all AddrRanges are aligned. + end, ok := ar.End.RoundUp() + var alignerr error + if !ok { + end = ar.End.RoundDown() + alignerr = syserror.EFAULT + } + ar = usermem.AddrRange{ar.Start.RoundDown(), end} + + pstart, pend, perr := mm.ensurePMAsLocked(ctx, vseg, ar) + if pend.Start() <= ar.Start { + return pmaIterator{}, pend, perr + } + // ensurePMAsLocked may not have pstart due to iterator invalidation. We + // need it, either to return it immediately or to pass to + // breakCopyOnWriteLocked. + if !pstart.Ok() { + pstart = mm.findOrSeekPrevUpperBoundPMA(ar.Start, pend) + } + + var cowerr error + if opts.breakCOW { + var invalidated bool + pend, invalidated, cowerr = mm.breakCopyOnWriteLocked(pstart, ar) + if pend.Start() <= ar.Start { + return pmaIterator{}, pend, cowerr + } + if invalidated { + pstart = mm.findOrSeekPrevUpperBoundPMA(ar.Start, pend) + } + } + + if cowerr != nil { + return pstart, pend, cowerr + } + if perr != nil { + return pstart, pend, perr + } + return pstart, pend, alignerr +} + +// getVecPMAsLocked ensures that pmas exist for all addresses in ars. It +// returns the subset of ars for which pmas exist. If this is not equal to ars, +// it returns a non-nil error explaining why. +// +// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for +// writing. vmas must exist for all addresses in ars. +func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, opts pmaOpts) (usermem.AddrRangeSeq, error) { + for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { + ar := arsit.Head() + if ar.Length() == 0 { + continue + } + + // Page-align ar so that all AddrRanges are aligned. + end, ok := ar.End.RoundUp() + var alignerr error + if !ok { + end = ar.End.RoundDown() + alignerr = syserror.EFAULT + } + ar = usermem.AddrRange{ar.Start.RoundDown(), end} + + pstart, pend, perr := mm.ensurePMAsLocked(ctx, mm.vmas.FindSegment(ar.Start), ar) + if pend.Start() <= ar.Start { + return truncatedAddrRangeSeq(ars, arsit, pend.Start()), perr + } + + var cowerr error + if opts.breakCOW { + if !pstart.Ok() { + pstart = mm.findOrSeekPrevUpperBoundPMA(ar.Start, pend) + } + pend, _, cowerr = mm.breakCopyOnWriteLocked(pstart, ar) + } + + if cowerr != nil { + return truncatedAddrRangeSeq(ars, arsit, pend.Start()), cowerr + } + if perr != nil { + return truncatedAddrRangeSeq(ars, arsit, pend.Start()), perr + } + if alignerr != nil { + return truncatedAddrRangeSeq(ars, arsit, pend.Start()), alignerr + } + } + + return ars, nil +} + +// ensurePMAsLocked ensures that pmas exist for all addresses in ar. It returns: +// +// - An iterator to the pma containing ar.Start, on a best-effort basis (that +// is, the returned iterator may be terminal, even if such a pma exists). +// Returning this iterator on a best-effort basis allows callers that require +// it to use it when it's cheaply available, while also avoiding the overhead +// of retrieving it when it's not. +// +// - An iterator to the gap after the last pma containing an address in ar. If +// pmas exist for no addresses in ar, the iterator is to a gap that begins +// before ar.Start. +// +// - An error that is non-nil if pmas exist for only a subset of ar. +// +// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for +// writing. ar.Length() != 0. ar must be page-aligned. +// vseg.Range().Contains(ar.Start). vmas must exist for all addresses in ar. +func (mm *MemoryManager) ensurePMAsLocked(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange) (pmaIterator, pmaGapIterator, error) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !vseg.Range().Contains(ar.Start) { + panic(fmt.Sprintf("initial vma %v does not cover start of ar %v", vseg.Range(), ar)) + } + } + + pstart, pgap := mm.pmas.Find(ar.Start) + if pstart.Ok() { + pgap = pstart.NextGap() + } + for pgap.Start() < ar.End { + if pgap.Range().Length() == 0 { + pgap = pgap.NextGap() + continue + } + // A single pgap might be spanned by multiple vmas. Insert pmas to + // cover the first (vma, pgap) pair. + pgapAR := pgap.Range().Intersect(ar) + vseg = vseg.seekNextLowerBound(pgapAR.Start) + if checkInvariants { + if !vseg.Ok() { + panic(fmt.Sprintf("no vma after %#x", pgapAR.Start)) + } + if pgapAR.Start < vseg.Start() { + panic(fmt.Sprintf("no vma in [%#x, %#x)", pgapAR.Start, vseg.Start())) + } + } + var err error + pgap, err = mm.insertPMAsLocked(ctx, vseg, pgap, ar) + // insertPMAsLocked most likely invalidated iterators, so pstart is now + // unknown. + pstart = pmaIterator{} + if err != nil { + return pstart, pgap, err + } + } + return pstart, pgap, nil +} + +const ( + // When memory is allocated for a private pma, align the allocated address + // range to a privateAllocUnit boundary when possible. Larger values of + // privateAllocUnit may reduce page faults by allowing fewer, larger pmas + // to be mapped, but may result in larger amounts of wasted memory in the + // presence of fragmentation. privateAllocUnit must be a power-of-2 + // multiple of usermem.PageSize. + privateAllocUnit = usermem.HugePageSize + + privateAllocMask = privateAllocUnit - 1 +) + +func privateAligned(ar usermem.AddrRange) usermem.AddrRange { + aligned := usermem.AddrRange{ar.Start &^ privateAllocMask, ar.End} + if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End { + aligned.End = end + } + if checkInvariants { + if !aligned.IsSupersetOf(ar) { + panic(fmt.Sprintf("aligned AddrRange %#v is not a superset of ar %#v", aligned, ar)) + } + } + return aligned +} + +// insertPMAsLocked inserts pmas into pgap corresponding to the vma iterated by +// vseg, spanning at least ar. It returns: +// +// - An iterator to the gap after the last pma containing an address in ar. If +// pmas exist for no addresses in ar, the iterator is to a gap that begins +// before ar.Start. +// +// - An error that is non-nil if pmas exist for only a subset of ar. +// +// Preconditions: mm.mappingMu must be locked. mm.activeMu must be locked for +// writing. vseg.Range().Intersect(pgap.Range()).Intersect(ar).Length() != 0. +// ar must be page-aligned. +func (mm *MemoryManager) insertPMAsLocked(ctx context.Context, vseg vmaIterator, pgap pmaGapIterator, ar usermem.AddrRange) (pmaGapIterator, error) { + optAR := vseg.Range().Intersect(pgap.Range()) + if checkInvariants { + if optAR.Length() <= 0 { + panic(fmt.Sprintf("vseg %v and pgap %v do not overlap", vseg, pgap)) + } + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar %v", ar)) + } + } + vma := vseg.ValuePtr() + + // Private anonymous mappings get pmas by allocating. + if vma.mappable == nil { + // Limit the range we allocate to ar, aligned to privateAllocUnit. + maskAR := privateAligned(ar) + allocAR := optAR.Intersect(maskAR) + mem := mm.p.Memory() + fr, err := mem.Allocate(uint64(allocAR.Length()), usage.Anonymous) + if err != nil { + return pgap, err + } + mm.incPrivateRef(fr) + + if checkInvariants { + if !fr.WellFormed() || fr.Length() != uint64(allocAR.Length()) { + panic(fmt.Sprintf("Allocate(%v) returned invalid FileRange %v", allocAR.Length(), fr)) + } + } + + mm.addRSSLocked(allocAR) + mem.IncRef(fr) + + return mm.pmas.Insert(pgap, allocAR, pma{ + file: mem, + off: fr.Start, + vmaEffectivePerms: vma.effectivePerms, + vmaMaxPerms: vma.maxPerms, + private: true, + // Since we just allocated this memory and have the only reference, + // the new pma does not need copy-on-write. + }).NextGap(), nil + } + + // Other mappings get pmas by translating. Limit the required range + // to ar. + optMR := vseg.mappableRangeOf(optAR) + reqAR := optAR.Intersect(ar) + reqMR := vseg.mappableRangeOf(reqAR) + perms := vma.maxPerms + if vma.private { + perms.Write = false + } + ts, err := vma.mappable.Translate(ctx, reqMR, optMR, perms) + if checkInvariants { + if err := memmap.CheckTranslateResult(reqMR, optMR, ts, err); err != nil { + panic(fmt.Sprintf("Mappable(%T).Translate(%v, %v): %v", vma.mappable, reqMR, optMR, err)) + } + } + + // Install a pma for each Translation. + for _, t := range ts { + // This is valid because memmap.Mappable.Translate is required to + // return Translations in increasing Translation.Source order. + addrRange := vseg.addrRangeOf(t.Source) + mm.addRSSLocked(addrRange) + pseg := mm.pmas.Insert(pgap, addrRange, pma{ + file: t.File, + off: t.Offset, + vmaEffectivePerms: vma.effectivePerms, + vmaMaxPerms: vma.maxPerms, + needCOW: vma.private, + }) + // The new pseg may have been merged with existing segments, only take a + // ref on the inserted range. + t.File.IncRef(pseg.fileRangeOf(addrRange)) + pgap = pseg.NextGap() + } + + // Even if Translate returned an error, if we got to ar.End, + // insertPMAsLocked succeeded. + if ar.End <= pgap.Start() { + return pgap, nil + } + return pgap, err +} + +// breakCopyOnWriteLocked ensures that pmas in ar are not copy-on-write. It +// returns: +// +// - An iterator to the gap after the last non-COW pma containing an address in +// ar. If non-COW pmas exist for no addresses in ar, the iterator is to a gap +// that begins before ar.Start. +// +// - A boolean that is true if iterators into mm.pmas may have been +// invalidated. +// +// - An error that is non-nil if non-COW pmas exist for only a subset of ar. +// +// Preconditions: mm.activeMu must be locked for writing. ar.Length() != 0. ar +// must be page-aligned. pseg.Range().Contains(ar.Start). pmas must exist for +// all addresses in ar. +func (mm *MemoryManager) breakCopyOnWriteLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, bool, error) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !pseg.Range().Contains(ar.Start) { + panic(fmt.Sprintf("initial pma %v does not cover start of ar %v", pseg.Range(), ar)) + } + } + + // Limit the range we copy to ar, aligned to privateAllocUnit. + maskAR := privateAligned(ar) + var invalidatedIterators, didUnmapAS bool + mem := mm.p.Memory() + for { + if mm.isPMACopyOnWriteLocked(pseg) { + // Determine the range to copy. + copyAR := pseg.Range().Intersect(maskAR) + + // Get internal mappings from the pma to copy from. + if err := pseg.getInternalMappingsLocked(); err != nil { + return pseg.PrevGap(), invalidatedIterators, err + } + + // Copy contents. + fr, err := platform.AllocateAndFill(mem, uint64(copyAR.Length()), usage.Anonymous, &safemem.BlockSeqReader{mm.internalMappingsLocked(pseg, copyAR)}) + if _, ok := err.(safecopy.BusError); ok { + // If we got SIGBUS during the copy, deliver SIGBUS to + // userspace (instead of SIGSEGV) if we're breaking + // copy-on-write due to application page fault. + err = &memmap.BusError{err} + } + if fr.Length() == 0 { + return pseg.PrevGap(), invalidatedIterators, err + } + mm.incPrivateRef(fr) + mem.IncRef(fr) + + // Unmap all of maskAR, not just copyAR, to minimize host syscalls. + // AddressSpace mappings must be removed before mm.decPrivateRef(). + if !didUnmapAS { + mm.unmapASLocked(maskAR) + didUnmapAS = true + } + + // Replace the pma with a copy in the part of the address range + // where copying was successful. + copyAR.End = copyAR.Start + usermem.Addr(fr.Length()) + if copyAR != pseg.Range() { + pseg = mm.pmas.Isolate(pseg, copyAR) + invalidatedIterators = true + } + pma := pseg.ValuePtr() + if pma.private { + mm.decPrivateRef(pseg.fileRange()) + } + pma.file.DecRef(pseg.fileRange()) + + pma.file = mem + pma.off = fr.Start + pma.private = true + pma.needCOW = false + pma.internalMappings = safemem.BlockSeq{} + + // Try to merge pma with its neighbors. + if prev := pseg.PrevSegment(); prev.Ok() { + if merged := mm.pmas.Merge(prev, pseg); merged.Ok() { + pseg = merged + invalidatedIterators = true + } + } + if next := pseg.NextSegment(); next.Ok() { + if merged := mm.pmas.Merge(pseg, next); merged.Ok() { + pseg = merged + invalidatedIterators = true + } + } + + // If an error occurred after ar.End, breakCopyOnWriteLocked still + // did its job, so discard the error. + if err != nil && pseg.End() < ar.End { + return pseg.NextGap(), invalidatedIterators, err + } + } + // This checks against ar.End, not maskAR.End, so we will never break + // COW on a pma that does not intersect ar. + if ar.End <= pseg.End() { + return pseg.NextGap(), invalidatedIterators, nil + } + pseg = pseg.NextSegment() + } +} + +// Preconditions: mm.activeMu must be locked for writing. +func (mm *MemoryManager) isPMACopyOnWriteLocked(pseg pmaIterator) bool { + pma := pseg.ValuePtr() + if !pma.needCOW { + return false + } + if !pma.private { + return true + } + // If we have the only reference on private memory to be copied, just take + // ownership of it instead of copying. If we do hold the only reference, + // additional references can only be taken by mm.Fork(), which is excluded + // by mm.activeMu, so this isn't racy. + mm.privateRefs.mu.Lock() + defer mm.privateRefs.mu.Unlock() + fr := pseg.fileRange() + // This check relies on mm.privateRefs.refs being kept fully merged. + rseg := mm.privateRefs.refs.FindSegment(fr.Start) + if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() { + pma.needCOW = false + return false + } + return true +} + +// Invalidate implements memmap.MappingSpace.Invalidate. +func (mm *MemoryManager) Invalidate(ar usermem.AddrRange, opts memmap.InvalidateOpts) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + mm.activeMu.Lock() + defer mm.activeMu.Unlock() + if mm.captureInvalidations { + mm.capturedInvalidations = append(mm.capturedInvalidations, invalidateArgs{ar, opts}) + return + } + mm.invalidateLocked(ar, opts.InvalidatePrivate, true) +} + +// invalidateLocked removes pmas and AddressSpace mappings of those pmas for +// addresses in ar. +// +// Preconditions: mm.activeMu must be locked for writing. ar.Length() != 0. ar +// must be page-aligned. +func (mm *MemoryManager) invalidateLocked(ar usermem.AddrRange, invalidatePrivate, invalidateShared bool) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + var didUnmapAS bool + pseg := mm.pmas.LowerBoundSegment(ar.Start) + for pseg.Ok() && pseg.Start() < ar.End { + pma := pseg.ValuePtr() + if (invalidatePrivate && pma.private) || (invalidateShared && !pma.private) { + pseg = mm.pmas.Isolate(pseg, ar) + pma = pseg.ValuePtr() + if !didUnmapAS { + // Unmap all of ar, not just pseg.Range(), to minimize host + // syscalls. AddressSpace mappings must be removed before + // mm.decPrivateRef(). + mm.unmapASLocked(ar) + didUnmapAS = true + } + if pma.private { + mm.decPrivateRef(pseg.fileRange()) + } + mm.removeRSSLocked(pseg.Range()) + pma.file.DecRef(pseg.fileRange()) + pseg = mm.pmas.Remove(pseg).NextSegment() + } else { + pseg = pseg.NextSegment() + } + } +} + +// movePMAsLocked moves all pmas in oldAR to newAR. +// +// Preconditions: mm.activeMu must be locked for writing. oldAR.Length() != 0. +// oldAR.Length() == newAR.Length(). !oldAR.Overlaps(newAR). +// mm.pmas.IsEmptyRange(newAR). oldAR and newAR must be page-aligned. +func (mm *MemoryManager) movePMAsLocked(oldAR, newAR usermem.AddrRange) { + if checkInvariants { + if !oldAR.WellFormed() || oldAR.Length() <= 0 || !oldAR.IsPageAligned() { + panic(fmt.Sprintf("invalid oldAR: %v", oldAR)) + } + if !newAR.WellFormed() || newAR.Length() <= 0 || !newAR.IsPageAligned() { + panic(fmt.Sprintf("invalid newAR: %v", newAR)) + } + if oldAR.Length() != newAR.Length() { + panic(fmt.Sprintf("old and new address ranges have different lengths: %v, %v", oldAR, newAR)) + } + if oldAR.Overlaps(newAR) { + panic(fmt.Sprintf("old and new address ranges overlap: %v, %v", oldAR, newAR)) + } + // mm.pmas.IsEmptyRange is checked by mm.pmas.Insert. + } + + type movedPMA struct { + oldAR usermem.AddrRange + pma pma + } + var movedPMAs []movedPMA + pseg := mm.pmas.LowerBoundSegment(oldAR.Start) + for pseg.Ok() && pseg.Start() < oldAR.End { + pseg = mm.pmas.Isolate(pseg, oldAR) + movedPMAs = append(movedPMAs, movedPMA{ + oldAR: pseg.Range(), + pma: pseg.Value(), + }) + mm.removeRSSLocked(pseg.Range()) + pseg = mm.pmas.Remove(pseg).NextSegment() + } + + off := newAR.Start - oldAR.Start + pgap := mm.pmas.FindGap(newAR.Start) + for i := range movedPMAs { + mpma := &movedPMAs[i] + pmaNewAR := usermem.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off} + mm.addRSSLocked(pmaNewAR) + pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap() + } + + mm.unmapASLocked(oldAR) +} + +// getPMAInternalMappingsLocked ensures that pmas for all addresses in ar have +// cached internal mappings. It returns: +// +// - An iterator to the gap after the last pma with internal mappings +// containing an address in ar. If internal mappings exist for no addresses in +// ar, the iterator is to a gap that begins before ar.Start. +// +// - An error that is non-nil if internal mappings exist for only a subset of +// ar. +// +// Preconditions: mm.activeMu must be locked for writing. +// pseg.Range().Contains(ar.Start). pmas must exist for all addresses in ar. +// ar.Length() != 0. +// +// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators +// into mm.pmas. +func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) (pmaGapIterator, error) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !pseg.Range().Contains(ar.Start) { + panic(fmt.Sprintf("initial pma %v does not cover start of ar %v", pseg.Range(), ar)) + } + } + + for { + if err := pseg.getInternalMappingsLocked(); err != nil { + return pseg.PrevGap(), err + } + if ar.End <= pseg.End() { + return pseg.NextGap(), nil + } + pseg, _ = pseg.NextNonEmpty() + } +} + +// getVecPMAInternalMappingsLocked ensures that pmas for all addresses in ars +// have cached internal mappings. It returns the subset of ars for which +// internal mappings exist. If this is not equal to ars, it returns a non-nil +// error explaining why. +// +// Preconditions: mm.activeMu must be locked for writing. pmas must exist for +// all addresses in ar. +// +// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators +// into mm.pmas. +func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars usermem.AddrRangeSeq) (usermem.AddrRangeSeq, error) { + for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { + ar := arsit.Head() + if ar.Length() == 0 { + continue + } + if pend, err := mm.getPMAInternalMappingsLocked(mm.pmas.FindSegment(ar.Start), ar); err != nil { + return truncatedAddrRangeSeq(ars, arsit, pend.Start()), err + } + } + return ars, nil +} + +// internalMappingsLocked returns internal mappings for addresses in ar. +// +// Preconditions: mm.activeMu must be locked. Internal mappings must have been +// previously established for all addresses in ar. ar.Length() != 0. +// pseg.Range().Contains(ar.Start). +func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar usermem.AddrRange) safemem.BlockSeq { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !pseg.Range().Contains(ar.Start) { + panic(fmt.Sprintf("initial pma %v does not cover start of ar %v", pseg.Range(), ar)) + } + } + + if ar.End <= pseg.End() { + // Since only one pma is involved, we can use pma.internalMappings + // directly, avoiding a slice allocation. + offset := uint64(ar.Start - pseg.Start()) + return pseg.ValuePtr().internalMappings.DropFirst64(offset).TakeFirst64(uint64(ar.Length())) + } + + var ims []safemem.Block + for { + pr := pseg.Range().Intersect(ar) + for pims := pseg.ValuePtr().internalMappings.DropFirst64(uint64(pr.Start - pseg.Start())).TakeFirst64(uint64(pr.Length())); !pims.IsEmpty(); pims = pims.Tail() { + ims = append(ims, pims.Head()) + } + if ar.End <= pseg.End() { + break + } + pseg = pseg.NextSegment() + } + return safemem.BlockSeqFromSlice(ims) +} + +// vecInternalMappingsLocked returns internal mappings for addresses in ars. +// +// Preconditions: mm.activeMu must be locked. Internal mappings must have been +// previously established for all addresses in ars. +func (mm *MemoryManager) vecInternalMappingsLocked(ars usermem.AddrRangeSeq) safemem.BlockSeq { + var ims []safemem.Block + for ; !ars.IsEmpty(); ars = ars.Tail() { + ar := ars.Head() + if ar.Length() == 0 { + continue + } + for pims := mm.internalMappingsLocked(mm.pmas.FindSegment(ar.Start), ar); !pims.IsEmpty(); pims = pims.Tail() { + ims = append(ims, pims.Head()) + } + } + return safemem.BlockSeqFromSlice(ims) +} + +// incPrivateRef acquires a reference on private pages in fr. +func (mm *MemoryManager) incPrivateRef(fr platform.FileRange) { + mm.privateRefs.mu.Lock() + defer mm.privateRefs.mu.Unlock() + refSet := &mm.privateRefs.refs + seg, gap := refSet.Find(fr.Start) + for { + switch { + case seg.Ok() && seg.Start() < fr.End: + seg = refSet.Isolate(seg, fr) + seg.SetValue(seg.Value() + 1) + seg, gap = seg.NextNonEmpty() + case gap.Ok() && gap.Start() < fr.End: + seg, gap = refSet.InsertWithoutMerging(gap, gap.Range().Intersect(fr), 1).NextNonEmpty() + default: + refSet.MergeAdjacent(fr) + return + } + } +} + +// decPrivateRef releases a reference on private pages in fr. +func (mm *MemoryManager) decPrivateRef(fr platform.FileRange) { + var freed []platform.FileRange + + mm.privateRefs.mu.Lock() + refSet := &mm.privateRefs.refs + seg := refSet.LowerBoundSegment(fr.Start) + for seg.Ok() && seg.Start() < fr.End { + seg = refSet.Isolate(seg, fr) + if old := seg.Value(); old == 1 { + freed = append(freed, seg.Range()) + seg = refSet.Remove(seg).NextSegment() + } else { + seg.SetValue(old - 1) + seg = seg.NextSegment() + } + } + refSet.MergeAdjacent(fr) + mm.privateRefs.mu.Unlock() + + mem := mm.p.Memory() + for _, fr := range freed { + mem.DecRef(fr) + } +} + +// addRSSLocked updates the current and maximum resident set size of a +// MemoryManager to reflect the insertion of a pma at ar. +// +// Preconditions: mm.activeMu must be locked for writing. +func (mm *MemoryManager) addRSSLocked(ar usermem.AddrRange) { + mm.curRSS += uint64(ar.Length()) + if mm.curRSS > mm.maxRSS { + mm.maxRSS = mm.curRSS + } +} + +// removeRSSLocked updates the current resident set size of a MemoryManager to +// reflect the removal of a pma at ar. +// +// Preconditions: mm.activeMu must be locked for writing. +func (mm *MemoryManager) removeRSSLocked(ar usermem.AddrRange) { + mm.curRSS -= uint64(ar.Length()) +} + +// pmaSetFunctions implements segment.Functions for pmaSet. +type pmaSetFunctions struct{} + +func (pmaSetFunctions) MinKey() usermem.Addr { + return 0 +} + +func (pmaSetFunctions) MaxKey() usermem.Addr { + return ^usermem.Addr(0) +} + +func (pmaSetFunctions) ClearValue(pma *pma) { + pma.file = nil + pma.internalMappings = safemem.BlockSeq{} +} + +func (pmaSetFunctions) Merge(ar1 usermem.AddrRange, pma1 pma, ar2 usermem.AddrRange, pma2 pma) (pma, bool) { + if pma1.file != pma2.file || + pma1.off+uint64(ar1.Length()) != pma2.off || + pma1.vmaEffectivePerms != pma2.vmaEffectivePerms || + pma1.vmaMaxPerms != pma2.vmaMaxPerms || + pma1.needCOW != pma2.needCOW || + pma1.private != pma2.private { + return pma{}, false + } + + // Discard internal mappings instead of trying to merge them, since merging + // them requires an allocation and getting them again from the + // platform.File might not. + pma1.internalMappings = safemem.BlockSeq{} + return pma1, true +} + +func (pmaSetFunctions) Split(ar usermem.AddrRange, p pma, split usermem.Addr) (pma, pma) { + newlen1 := uint64(split - ar.Start) + p2 := p + p2.off += newlen1 + if !p.internalMappings.IsEmpty() { + p.internalMappings = p.internalMappings.TakeFirst64(newlen1) + p2.internalMappings = p2.internalMappings.DropFirst64(newlen1) + } + return p, p2 +} + +// findOrSeekPrevUpperBoundPMA returns mm.pmas.UpperBoundSegment(addr), but may do +// so by scanning linearly backward from pgap. +// +// Preconditions: mm.activeMu must be locked. addr <= pgap.Start(). +func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr usermem.Addr, pgap pmaGapIterator) pmaIterator { + if checkInvariants { + if !pgap.Ok() { + panic("terminal pma iterator") + } + if addr > pgap.Start() { + panic(fmt.Sprintf("can't seek backward to %#x from %#x", addr, pgap.Start())) + } + } + // Optimistically check if pgap.PrevSegment() is the PMA we're looking for, + // which is the case if findOrSeekPrevUpperBoundPMA is called to find the + // start of a range containing only a single PMA. + if pseg := pgap.PrevSegment(); pseg.Start() <= addr { + return pseg + } + return mm.pmas.UpperBoundSegment(addr) +} + +// getInternalMappingsLocked ensures that pseg.ValuePtr().internalMappings is +// non-empty. +// +// Preconditions: mm.activeMu must be locked for writing. +func (pseg pmaIterator) getInternalMappingsLocked() error { + pma := pseg.ValuePtr() + if pma.internalMappings.IsEmpty() { + // Internal mappings are used for ignorePermissions accesses, + // so we need to use vma.maxPerms instead of + // vma.effectivePerms. However, we will never execute + // application code through an internal mapping, and we don't + // actually need a writable mapping if copy-on-write is in + // effect. (But get a writable mapping anyway if the pma is + // private, so that if breakCopyOnWriteLocked => + // isPMACopyOnWriteLocked takes ownership of the pma instead of + // copying, it doesn't need to get a new mapping.) + perms := pma.vmaMaxPerms + perms.Execute = false + if pma.needCOW && !pma.private { + perms.Write = false + } + ims, err := pma.file.MapInternal(pseg.fileRange(), perms) + if err != nil { + return err + } + pma.internalMappings = ims + } + return nil +} + +func (pseg pmaIterator) fileRange() platform.FileRange { + return pseg.fileRangeOf(pseg.Range()) +} + +// Preconditions: pseg.Range().IsSupersetOf(ar). ar.Length != 0. +func (pseg pmaIterator) fileRangeOf(ar usermem.AddrRange) platform.FileRange { + if checkInvariants { + if !pseg.Ok() { + panic("terminal pma iterator") + } + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !pseg.Range().IsSupersetOf(ar) { + panic(fmt.Sprintf("ar %v out of bounds %v", ar, pseg.Range())) + } + } + + pma := pseg.ValuePtr() + pstart := pseg.Start() + return platform.FileRange{pma.off + uint64(ar.Start-pstart), pma.off + uint64(ar.End-pstart)} +} diff --git a/pkg/sentry/mm/proc_pid_maps.go b/pkg/sentry/mm/proc_pid_maps.go new file mode 100644 index 000000000..5840b257c --- /dev/null +++ b/pkg/sentry/mm/proc_pid_maps.go @@ -0,0 +1,105 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "bytes" + "fmt" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/seqfile" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // devMinorBits is the number of minor bits in a device number. Linux: + // include/linux/kdev_t.h:MINORBITS + devMinorBits = 20 +) + +// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate. +func (mm *MemoryManager) NeedsUpdate(generation int64) bool { + return true +} + +// ReadSeqFileData is called by fs/proc.mapsData.ReadSeqFileData. +func (mm *MemoryManager) ReadSeqFileData(ctx context.Context, handle seqfile.SeqHandle) ([]seqfile.SeqData, int64) { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + var data []seqfile.SeqData + var start usermem.Addr + if handle != nil { + start = *handle.(*usermem.Addr) + } + for vseg := mm.vmas.LowerBoundSegment(start); vseg.Ok(); vseg = vseg.NextSegment() { + // FIXME: If we use a usermem.Addr for the handle, we get + // "panic: autosave error: type usermem.Addr is not registered". + vmaAddr := vseg.End() + data = append(data, seqfile.SeqData{ + Buf: mm.vmaMapsEntryLocked(ctx, vseg), + Handle: &vmaAddr, + }) + } + return data, 1 +} + +// vmaMapsEntryLocked returns a /proc/[pid]/maps entry for the vma iterated by +// vseg, including the trailing newline. +// +// Preconditions: mm.mappingMu must be locked. +func (mm *MemoryManager) vmaMapsEntryLocked(ctx context.Context, vseg vmaIterator) []byte { + vma := vseg.ValuePtr() + private := "p" + if !vma.private { + private = "s" + } + + var dev, ino uint64 + if vma.id != nil { + dev = vma.id.DeviceID() + ino = vma.id.InodeID() + } + devMajor := uint32(dev >> devMinorBits) + devMinor := uint32(dev & ((1 << devMinorBits) - 1)) + + var b bytes.Buffer + // Do not include the guard page: fs/proc/task_mmu.c:show_map_vma() => + // stack_guard_page_start(). + fmt.Fprintf(&b, "%08x-%08x %s%s %08x %02x:%02x %d ", + vseg.Start(), vseg.End(), vma.realPerms, private, vma.off, devMajor, devMinor, ino) + + // Figure out our filename or hint. + var s string + if vma.hint != "" { + s = vma.hint + } else if vma.id != nil { + // FIXME: We are holding mm.mappingMu here, which is + // consistent with Linux's holding mmap_sem in + // fs/proc/task_mmu.c:show_map_vma() => fs/seq_file.c:seq_file_path(). + // However, it's not clear that fs.File.MappedName() is actually + // consistent with this lock order. + s = vma.id.MappedName(ctx) + } + if s != "" { + // Per linux, we pad until the 74th character. + if pad := 73 - b.Len(); pad > 0 { + b.WriteString(strings.Repeat(" ", pad)) + } + b.WriteString(s) + } + b.WriteString("\n") + return b.Bytes() +} diff --git a/pkg/sentry/mm/save_restore.go b/pkg/sentry/mm/save_restore.go new file mode 100644 index 000000000..36fed8f1c --- /dev/null +++ b/pkg/sentry/mm/save_restore.go @@ -0,0 +1,57 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// InvalidateUnsavable invokes memmap.Mappable.InvalidateUnsavable on all +// Mappables mapped by mm. +func (mm *MemoryManager) InvalidateUnsavable(ctx context.Context) error { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + for vseg := mm.vmas.FirstSegment(); vseg.Ok(); vseg = vseg.NextSegment() { + if vma := vseg.ValuePtr(); vma.mappable != nil { + if err := vma.mappable.InvalidateUnsavable(ctx); err != nil { + return err + } + } + } + return nil +} + +// beforeSave is invoked by stateify. +func (mm *MemoryManager) beforeSave() { + mem := mm.p.Memory() + for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() { + if pma := pseg.ValuePtr(); pma.file != mem { + // InvalidateUnsavable should have caused all such pmas to be + // invalidated. + panic(fmt.Sprintf("Can't save pma %#v with non-Memory file of type %T:\n%s", pseg.Range(), pma.file, mm)) + } + } +} + +// afterLoad is invoked by stateify. +func (mm *MemoryManager) afterLoad() { + mm.haveASIO = mm.p.SupportsAddressSpaceIO() + mem := mm.p.Memory() + for pseg := mm.pmas.FirstSegment(); pseg.Ok(); pseg = pseg.NextSegment() { + pseg.ValuePtr().file = mem + } +} diff --git a/pkg/sentry/mm/special_mappable.go b/pkg/sentry/mm/special_mappable.go new file mode 100644 index 000000000..9d3614034 --- /dev/null +++ b/pkg/sentry/mm/special_mappable.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SpecialMappable implements memmap.MappingIdentity and memmap.Mappable with +// semantics similar to Linux's mm/mmap.c:_install_special_mapping(), except +// that SpecialMappable takes ownership of the memory that it represents +// (_install_special_mapping() does not.) +type SpecialMappable struct { + refs.AtomicRefCount + + p platform.Platform + fr platform.FileRange + name string +} + +// NewSpecialMappable returns a SpecialMappable that owns fr, which represents +// offsets in p.Memory() that contain the SpecialMappable's data. The +// SpecialMappable will use the given name in /proc/[pid]/maps. +// +// Preconditions: fr.Length() != 0. +func NewSpecialMappable(name string, p platform.Platform, fr platform.FileRange) *SpecialMappable { + return &SpecialMappable{p: p, fr: fr, name: name} +} + +// DecRef implements refs.RefCounter.DecRef. +func (m *SpecialMappable) DecRef() { + m.AtomicRefCount.DecRefWithDestructor(func() { + m.p.Memory().DecRef(m.fr) + }) +} + +// MappedName implements memmap.MappingIdentity.MappedName. +func (m *SpecialMappable) MappedName(ctx context.Context) string { + return m.name +} + +// DeviceID implements memmap.MappingIdentity.DeviceID. +func (m *SpecialMappable) DeviceID() uint64 { + return 0 +} + +// InodeID implements memmap.MappingIdentity.InodeID. +func (m *SpecialMappable) InodeID() uint64 { + return 0 +} + +// Msync implements memmap.MappingIdentity.Msync. +func (m *SpecialMappable) Msync(ctx context.Context, mr memmap.MappableRange) error { + // Linux: vm_file is NULL, causing msync to skip it entirely. + return nil +} + +// AddMapping implements memmap.Mappable.AddMapping. +func (m *SpecialMappable) AddMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) error { + return nil +} + +// RemoveMapping implements memmap.Mappable.RemoveMapping. +func (m *SpecialMappable) RemoveMapping(ctx context.Context, ms memmap.MappingSpace, ar usermem.AddrRange, offset uint64) { +} + +// CopyMapping implements memmap.Mappable.CopyMapping. +func (m *SpecialMappable) CopyMapping(ctx context.Context, ms memmap.MappingSpace, srcAR, dstAR usermem.AddrRange, offset uint64) error { + return nil +} + +// Translate implements memmap.Mappable.Translate. +func (m *SpecialMappable) Translate(ctx context.Context, required, optional memmap.MappableRange, at usermem.AccessType) ([]memmap.Translation, error) { + var err error + if required.End > m.fr.Length() { + err = &memmap.BusError{syserror.EFAULT} + } + if source := optional.Intersect(memmap.MappableRange{0, m.fr.Length()}); source.Length() != 0 { + return []memmap.Translation{ + { + Source: source, + File: m.p.Memory(), + Offset: m.fr.Start + source.Start, + }, + }, err + } + return nil, err +} + +// InvalidateUnsavable implements memmap.Mappable.InvalidateUnsavable. +func (m *SpecialMappable) InvalidateUnsavable(ctx context.Context) error { + // Since data is stored in platform.Platform.Memory(), the contents of + // which are preserved across save/restore, we don't need to do anything. + return nil +} + +// Platform returns the Platform whose Memory stores the SpecialMappable's +// contents. +func (m *SpecialMappable) Platform() platform.Platform { + return m.p +} + +// FileRange returns the offsets into Platform().Memory() that store the +// SpecialMappable's contents. +func (m *SpecialMappable) FileRange() platform.FileRange { + return m.fr +} + +// Length returns the length of the SpecialMappable. +func (m *SpecialMappable) Length() uint64 { + return m.fr.Length() +} + +// NewSharedAnonMappable returns a SpecialMappable that implements the +// semantics of mmap(MAP_SHARED|MAP_ANONYMOUS) and mappings of /dev/zero. +// +// TODO: The use of SpecialMappable is a lazy code reuse hack. Linux +// uses an ephemeral file created by mm/shmem.c:shmem_zero_setup(); we should +// do the same to get non-zero device and inode IDs. +func NewSharedAnonMappable(length uint64, p platform.Platform) (*SpecialMappable, error) { + if length == 0 || length != uint64(usermem.Addr(length).RoundDown()) { + return nil, syserror.EINVAL + } + fr, err := p.Memory().Allocate(length, usage.Anonymous) + if err != nil { + return nil, err + } + return NewSpecialMappable("/dev/zero (deleted)", p, fr), nil +} diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go new file mode 100644 index 000000000..0730be65b --- /dev/null +++ b/pkg/sentry/mm/syscalls.go @@ -0,0 +1,794 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + mrand "math/rand" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// HandleUserFault handles an application page fault. sp is the faulting +// application thread's stack pointer. +// +// Preconditions: mm.as != nil. +func (mm *MemoryManager) HandleUserFault(ctx context.Context, addr usermem.Addr, at usermem.AccessType, sp usermem.Addr) error { + ar, ok := addr.RoundDown().ToRange(usermem.PageSize) + if !ok { + return syserror.EFAULT + } + + // Don't bother trying existingPMAsLocked; in most cases, if we did have + // existing pmas, we wouldn't have faulted. + + // Ensure that we have a usable vma. Here and below, since we are only + // asking for a single page, there is no possibility of partial success, + // and any error is immediately fatal. + mm.mappingMu.RLock() + vseg, _, err := mm.getVMAsLocked(ctx, ar, at, false) + if err != nil { + mm.mappingMu.RUnlock() + return err + } + + // Ensure that we have a usable pma. + mm.activeMu.Lock() + pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, pmaOpts{ + breakCOW: at.Write, + }) + mm.mappingMu.RUnlock() + if err != nil { + mm.activeMu.Unlock() + return err + } + + // Downgrade to a read-lock on activeMu since we don't need to mutate pmas + // anymore. + mm.activeMu.DowngradeLock() + + // Map the faulted page into the active AddressSpace. + err = mm.mapASLocked(pseg, ar, false) + mm.activeMu.RUnlock() + return err +} + +// MMap establishes a memory mapping. +func (mm *MemoryManager) MMap(ctx context.Context, opts memmap.MMapOpts) (usermem.Addr, error) { + if opts.Length == 0 { + return 0, syserror.EINVAL + } + length, ok := usermem.Addr(opts.Length).RoundUp() + if !ok { + return 0, syserror.ENOMEM + } + opts.Length = uint64(length) + + if opts.Mappable != nil { + // Offset must be aligned. + if usermem.Addr(opts.Offset).RoundDown() != usermem.Addr(opts.Offset) { + return 0, syserror.EINVAL + } + // Offset + length must not overflow. + if end := opts.Offset + opts.Length; end < opts.Offset { + return 0, syserror.ENOMEM + } + } else { + opts.Offset = 0 + if !opts.Private { + if opts.MappingIdentity != nil { + return 0, syserror.EINVAL + } + m, err := NewSharedAnonMappable(opts.Length, platform.FromContext(ctx)) + if err != nil { + return 0, err + } + opts.MappingIdentity = m + opts.Mappable = m + } + } + + if opts.Addr.RoundDown() != opts.Addr { + // MAP_FIXED requires addr to be page-aligned; non-fixed mappings + // don't. + if opts.Fixed { + return 0, syserror.EINVAL + } + opts.Addr = opts.Addr.RoundDown() + } + + if !opts.MaxPerms.SupersetOf(opts.Perms) { + return 0, syserror.EACCES + } + if opts.Unmap && !opts.Fixed { + return 0, syserror.EINVAL + } + if opts.GrowsDown && opts.Mappable != nil { + return 0, syserror.EINVAL + } + + // Get the new vma. + mm.mappingMu.Lock() + vseg, ar, err := mm.createVMALocked(ctx, opts) + if err != nil { + mm.mappingMu.Unlock() + return 0, err + } + + switch { + case opts.Precommit: + // Get pmas and map with precommit as requested. + mm.populateAndUnlock(ctx, vseg, ar, true) + + case opts.Mappable == nil && length <= privateAllocUnit: + // NOTE: Get pmas and map eagerly in the hope + // that doing so will save on future page faults. We only do this for + // anonymous mappings, since otherwise the cost of + // memmap.Mappable.Translate is unknown; and only for small mappings, + // to avoid needing to allocate large amounts of memory that we may + // subsequently need to checkpoint. + mm.populateAndUnlock(ctx, vseg, ar, false) + + default: + mm.mappingMu.Unlock() + } + + return ar.Start, nil +} + +// Preconditions: mm.mappingMu must be locked for writing. +// +// Postconditions: mm.mappingMu will be unlocked. +func (mm *MemoryManager) populateAndUnlock(ctx context.Context, vseg vmaIterator, ar usermem.AddrRange, precommit bool) { + if !vseg.ValuePtr().effectivePerms.Any() { + // Linux doesn't populate inaccessible pages. See + // mm/gup.c:populate_vma_page_range. + mm.mappingMu.Unlock() + return + } + + mm.activeMu.Lock() + + // Even if we get a new pma, we can't actually map it if we don't have an + // AddressSpace. + if mm.as == nil { + mm.activeMu.Unlock() + mm.mappingMu.Unlock() + return + } + + // Ensure that we have usable pmas. + mm.mappingMu.DowngradeLock() + pseg, _, err := mm.getPMAsLocked(ctx, vseg, ar, pmaOpts{}) + mm.mappingMu.RUnlock() + if err != nil { + // mm/util.c:vm_mmap_pgoff() ignores the error, if any, from + // mm/gup.c:mm_populate(). If it matters, we'll get it again when + // userspace actually tries to use the failing page. + mm.activeMu.Unlock() + return + } + + // Downgrade to a read-lock on activeMu since we don't need to mutate pmas + // anymore. + mm.activeMu.DowngradeLock() + + // As above, errors are silently ignored. + mm.mapASLocked(pseg, ar, precommit) + mm.activeMu.RUnlock() +} + +// MapStack allocates the initial process stack. +func (mm *MemoryManager) MapStack(ctx context.Context) (usermem.AddrRange, error) { + // maxStackSize is the maximum supported process stack size in bytes. + // + // This limit exists because stack growing isn't implemented, so the entire + // process stack must be mapped up-front. + const maxStackSize = 128 << 20 + + stackSize := limits.FromContext(ctx).Get(limits.Stack) + r, ok := usermem.Addr(stackSize.Cur).RoundUp() + sz := uint64(r) + if !ok { + // RLIM_INFINITY rounds up to 0. + sz = linux.DefaultStackSoftLimit + } else if sz > maxStackSize { + ctx.Warningf("Capping stack size from RLIMIT_STACK of %v down to %v.", sz, maxStackSize) + sz = maxStackSize + } else if sz == 0 { + return usermem.AddrRange{}, syserror.ENOMEM + } + szaddr := usermem.Addr(sz) + ctx.Debugf("Allocating stack with size of %v bytes", sz) + + // Determine the stack's desired location. Unlike Linux, address + // randomization can't be disabled. + stackEnd := mm.layout.MaxAddr - usermem.Addr(mrand.Int63n(int64(mm.layout.MaxStackRand))).RoundDown() + if stackEnd < szaddr { + return usermem.AddrRange{}, syserror.ENOMEM + } + stackStart := stackEnd - szaddr + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + _, ar, err := mm.createVMALocked(ctx, memmap.MMapOpts{ + Length: sz, + Addr: stackStart, + Perms: usermem.ReadWrite, + MaxPerms: usermem.AnyAccess, + Private: true, + GrowsDown: true, + Hint: "[stack]", + }) + return ar, err +} + +// MUnmap implements the semantics of Linux's munmap(2). +func (mm *MemoryManager) MUnmap(ctx context.Context, addr usermem.Addr, length uint64) error { + if addr != addr.RoundDown() { + return syserror.EINVAL + } + if length == 0 { + return syserror.EINVAL + } + la, ok := usermem.Addr(length).RoundUp() + if !ok { + return syserror.EINVAL + } + ar, ok := addr.ToRange(uint64(la)) + if !ok { + return syserror.EINVAL + } + + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + mm.unmapLocked(ctx, ar) + return nil +} + +// MRemapOpts specifies options to MRemap. +type MRemapOpts struct { + // Move controls whether MRemap moves the remapped mapping to a new address. + Move MRemapMoveMode + + // NewAddr is the new address for the remapping. NewAddr is ignored unless + // Move is MMRemapMustMove. + NewAddr usermem.Addr +} + +// MRemapMoveMode controls MRemap's moving behavior. +type MRemapMoveMode int + +const ( + // MRemapNoMove prevents MRemap from moving the remapped mapping. + MRemapNoMove MRemapMoveMode = iota + + // MRemapMayMove allows MRemap to move the remapped mapping. + MRemapMayMove + + // MRemapMustMove requires MRemap to move the remapped mapping to + // MRemapOpts.NewAddr, replacing any existing mappings in the remapped + // range. + MRemapMustMove +) + +// MRemap implements the semantics of Linux's mremap(2). +func (mm *MemoryManager) MRemap(ctx context.Context, oldAddr usermem.Addr, oldSize uint64, newSize uint64, opts MRemapOpts) (usermem.Addr, error) { + // "Note that old_address has to be page aligned." - mremap(2) + if oldAddr.RoundDown() != oldAddr { + return 0, syserror.EINVAL + } + + // Linux treats an old_size that rounds up to 0 as 0, which is otherwise a + // valid size. However, new_size can't be 0 after rounding. + oldSizeAddr, _ := usermem.Addr(oldSize).RoundUp() + oldSize = uint64(oldSizeAddr) + newSizeAddr, ok := usermem.Addr(newSize).RoundUp() + if !ok || newSizeAddr == 0 { + return 0, syserror.EINVAL + } + newSize = uint64(newSizeAddr) + + oldEnd, ok := oldAddr.AddLength(oldSize) + if !ok { + return 0, syserror.EINVAL + } + + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + + // All cases require that a vma exists at oldAddr. + vseg := mm.vmas.FindSegment(oldAddr) + if !vseg.Ok() { + return 0, syserror.EFAULT + } + + if opts.Move != MRemapMustMove { + // Handle noops and in-place shrinking. These cases don't care if + // [oldAddr, oldEnd) maps to a single vma, or is even mapped at all + // (aside from oldAddr). + if newSize <= oldSize { + if newSize < oldSize { + // If oldAddr+oldSize didn't overflow, oldAddr+newSize can't + // either. + newEnd := oldAddr + usermem.Addr(newSize) + mm.unmapLocked(ctx, usermem.AddrRange{newEnd, oldEnd}) + } + return oldAddr, nil + } + + // Handle in-place growing. + + // Check that oldEnd maps to the same vma as oldAddr. + if vseg.End() < oldEnd { + return 0, syserror.EFAULT + } + // "Grow" the existing vma by creating a new mergeable one. + vma := vseg.ValuePtr() + var newOffset uint64 + if vma.mappable != nil { + newOffset = vseg.mappableRange().End + } + _, _, err := mm.createVMALocked(ctx, memmap.MMapOpts{ + Length: newSize - oldSize, + MappingIdentity: vma.id, + Mappable: vma.mappable, + Offset: newOffset, + Addr: oldEnd, + Fixed: true, + Perms: vma.realPerms, + MaxPerms: vma.maxPerms, + Private: vma.private, + GrowsDown: vma.growsDown, + Hint: vma.hint, + }) + if err == nil { + return oldAddr, nil + } + // In-place growth failed. In the MRemapMayMove case, fall through to + // moving below. + if opts.Move == MRemapNoMove { + return 0, err + } + } + + // Handle moving, which is the only remaining case. + + // Find a destination for the move. + var newAR usermem.AddrRange + switch opts.Move { + case MRemapMayMove: + newAddr, err := mm.findAvailableLocked(newSize, findAvailableOpts{}) + if err != nil { + return 0, err + } + newAR, _ = newAddr.ToRange(newSize) + + case MRemapMustMove: + newAddr := opts.NewAddr + if newAddr.RoundDown() != newAddr { + return 0, syserror.EINVAL + } + var ok bool + newAR, ok = newAddr.ToRange(newSize) + if !ok { + return 0, syserror.EINVAL + } + if (usermem.AddrRange{oldAddr, oldEnd}).Overlaps(newAR) { + return 0, syserror.EINVAL + } + + // Unmap any mappings at the destination. + mm.unmapLocked(ctx, newAR) + + // If the sizes specify shrinking, unmap everything between the new and + // old sizes at the source. + if newSize < oldSize { + oldNewEnd := oldAddr + usermem.Addr(newSize) + mm.unmapLocked(ctx, usermem.AddrRange{oldNewEnd, oldEnd}) + oldEnd = oldNewEnd + } + + // unmapLocked may have invalidated vseg; look it up again. + vseg = mm.vmas.FindSegment(oldAddr) + } + + oldAR := usermem.AddrRange{oldAddr, oldEnd} + + // In the MRemapMustMove case, these checks happen after unmapping: + // mm/mremap.c:mremap_to() => do_munmap(), vma_to_resize(). + + // Check that oldEnd maps to the same vma as oldAddr. + if vseg.End() < oldEnd { + return 0, syserror.EFAULT + } + + // Check against RLIMIT_AS. + newUsageAS := mm.usageAS - uint64(oldAR.Length()) + uint64(newAR.Length()) + if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS { + return 0, syserror.ENOMEM + } + + if vma := vseg.ValuePtr(); vma.mappable != nil { + // Check that offset+length does not overflow. + if vma.off+uint64(newAR.Length()) < vma.off { + return 0, syserror.EINVAL + } + // Inform the Mappable, if any, of the copied mapping. + if err := vma.mappable.CopyMapping(ctx, mm, oldAR, newAR, vseg.mappableOffsetAt(oldAR.Start)); err != nil { + return 0, err + } + } + + // Remove the existing vma before inserting the new one to minimize + // iterator invalidation. We do this directly (instead of calling + // removeVMAsLocked) because: + // + // 1. We can't drop the reference on vma.id, which will be transferred to + // the new vma. + // + // 2. We can't call vma.mappable.RemoveMapping, because pmas are still at + // oldAR, so calling RemoveMapping could cause us to miss an invalidation + // overlapping oldAR. + // + // Call vseg.Value() (rather than vseg.ValuePtr()) first to make a copy of + // the vma. + vseg = mm.vmas.Isolate(vseg, oldAR) + vma := vseg.Value() + mm.vmas.Remove(vseg) + + // Insert the new vma, transferring the reference on vma.id. + mm.vmas.Add(newAR, vma) + + // Move pmas. This is technically optional for non-private pmas, which + // could just go through memmap.Mappable.Translate again, but it's required + // for private pmas. + mm.activeMu.Lock() + mm.movePMAsLocked(oldAR, newAR) + mm.activeMu.Unlock() + + // Now that pmas have been moved to newAR, we can notify vma.mappable that + // oldAR is no longer mapped. + if vma.mappable != nil { + vma.mappable.RemoveMapping(ctx, mm, oldAR, vma.off) + } + + return newAR.Start, nil +} + +// MProtect implements the semantics of Linux's mprotect(2). +func (mm *MemoryManager) MProtect(addr usermem.Addr, length uint64, realPerms usermem.AccessType, growsDown bool) error { + if addr.RoundDown() != addr { + return syserror.EINVAL + } + if length == 0 { + return nil + } + rlength, ok := usermem.Addr(length).RoundUp() + if !ok { + return syserror.ENOMEM + } + ar, ok := addr.ToRange(uint64(rlength)) + if !ok { + return syserror.ENOMEM + } + effectivePerms := realPerms.Effective() + + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + // Non-growsDown mprotect requires that all of ar is mapped, and stops at + // the first non-empty gap. growsDown mprotect requires that the first vma + // be growsDown, but does not require it to extend all the way to ar.Start; + // vmas after the first must be contiguous but need not be growsDown, like + // the non-growsDown case. + vseg := mm.vmas.LowerBoundSegment(ar.Start) + if !vseg.Ok() { + return syserror.ENOMEM + } + if growsDown { + if !vseg.ValuePtr().growsDown { + return syserror.EINVAL + } + if ar.End <= vseg.Start() { + return syserror.ENOMEM + } + ar.Start = vseg.Start() + } else { + if ar.Start < vseg.Start() { + return syserror.ENOMEM + } + } + + mm.activeMu.Lock() + defer mm.activeMu.Unlock() + defer func() { + mm.vmas.MergeRange(ar) + mm.vmas.MergeAdjacent(ar) + mm.pmas.MergeRange(ar) + mm.pmas.MergeAdjacent(ar) + }() + pseg := mm.pmas.LowerBoundSegment(ar.Start) + var didUnmapAS bool + for { + // Check for permission validity before splitting vmas, for consistency + // with Linux. + if !vseg.ValuePtr().maxPerms.SupersetOf(effectivePerms) { + return syserror.EACCES + } + vseg = mm.vmas.Isolate(vseg, ar) + + // Update vma permissions. + vma := vseg.ValuePtr() + vma.realPerms = realPerms + vma.effectivePerms = effectivePerms + + // Propagate vma permission changes to pmas. + for pseg.Ok() && pseg.Start() < vseg.End() { + if pseg.Range().Overlaps(vseg.Range()) { + pseg = mm.pmas.Isolate(pseg, vseg.Range()) + if !effectivePerms.SupersetOf(pseg.ValuePtr().vmaEffectivePerms) && !didUnmapAS { + // Unmap all of ar, not just vseg.Range(), to minimize host + // syscalls. + mm.unmapASLocked(ar) + didUnmapAS = true + } + pseg.ValuePtr().vmaEffectivePerms = effectivePerms + } + pseg = pseg.NextSegment() + } + + // Continue to the next vma. + if ar.End <= vseg.End() { + return nil + } + vseg, _ = vseg.NextNonEmpty() + if !vseg.Ok() { + return syserror.ENOMEM + } + } +} + +// BrkSetup sets mm's brk address to addr and its brk size to 0. +func (mm *MemoryManager) BrkSetup(ctx context.Context, addr usermem.Addr) { + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + // Unmap the existing brk. + if mm.brk.Length() != 0 { + mm.unmapLocked(ctx, mm.brk) + } + mm.brk = usermem.AddrRange{addr, addr} +} + +// Brk implements the semantics of Linux's brk(2), except that it returns an +// error on failure. +func (mm *MemoryManager) Brk(ctx context.Context, addr usermem.Addr) (usermem.Addr, error) { + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + + if addr < mm.brk.Start { + return mm.brk.End, syserror.EINVAL + } + + // TODO: This enforces RLIMIT_DATA, but is slightly more + // permissive than the usual data limit. In particular, this only + // limits the size of the heap; a true RLIMIT_DATA limits the size of + // heap + data + bss. The segment sizes need to be plumbed from the + // loader package to fully enforce RLIMIT_DATA. + if uint64(addr-mm.brk.Start) > limits.FromContext(ctx).Get(limits.Data).Cur { + return mm.brk.End, syserror.ENOMEM + } + + oldbrkpg, _ := mm.brk.End.RoundUp() + newbrkpg, ok := addr.RoundUp() + if !ok { + return mm.brk.End, syserror.EFAULT + } + + switch { + case newbrkpg < oldbrkpg: + mm.unmapLocked(ctx, usermem.AddrRange{newbrkpg, oldbrkpg}) + + case oldbrkpg < newbrkpg: + _, _, err := mm.createVMALocked(ctx, memmap.MMapOpts{ + Length: uint64(newbrkpg - oldbrkpg), + Addr: oldbrkpg, + Fixed: true, + // Compare Linux's + // arch/x86/include/asm/page_types.h:VM_DATA_DEFAULT_FLAGS. + Perms: usermem.ReadWrite, + MaxPerms: usermem.AnyAccess, + Private: true, + Hint: "[heap]", + }) + if err != nil { + return mm.brk.End, err + } + } + + mm.brk.End = addr + return addr, nil +} + +// Decommit implements the semantics of Linux's madvise(MADV_DONTNEED). +func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error { + ar, ok := addr.ToRange(length) + if !ok { + return syserror.EINVAL + } + + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + mm.activeMu.Lock() + defer mm.activeMu.Unlock() + + // Linux's mm/madvise.c:madvise_dontneed() => mm/memory.c:zap_page_range() + // is analogous to our mm.invalidateLocked(ar, true, true). We inline this + // here, with the special case that we synchronously decommit + // uniquely-owned (non-copy-on-write) pages for private anonymous vma, + // which is the common case for MADV_DONTNEED. Invalidating these pmas, and + // allowing them to be reallocated when touched again, increases pma + // fragmentation, which may significantly reduce performance for + // non-vectored I/O implementations. Also, decommitting synchronously + // ensures that Decommit immediately reduces host memory usage. + var didUnmapAS bool + pseg := mm.pmas.LowerBoundSegment(ar.Start) + vseg := mm.vmas.LowerBoundSegment(ar.Start) + mem := mm.p.Memory() + for pseg.Ok() && pseg.Start() < ar.End { + pma := pseg.ValuePtr() + if pma.private && !mm.isPMACopyOnWriteLocked(pseg) { + psegAR := pseg.Range().Intersect(ar) + vseg = vseg.seekNextLowerBound(psegAR.Start) + if checkInvariants { + if !vseg.Ok() { + panic(fmt.Sprintf("no vma after %#x", psegAR.Start)) + } + if psegAR.Start < vseg.Start() { + panic(fmt.Sprintf("no vma in [%#x, %#x)", psegAR.Start, vseg.Start())) + } + } + if vseg.Range().IsSupersetOf(psegAR) && vseg.ValuePtr().mappable == nil { + if err := mem.Decommit(pseg.fileRangeOf(psegAR)); err == nil { + pseg = pseg.NextSegment() + continue + } + // If an error occurs, fall through to the general + // invalidation case below. + } + } + pseg = mm.pmas.Isolate(pseg, ar) + pma = pseg.ValuePtr() + if !didUnmapAS { + // Unmap all of ar, not just pseg.Range(), to minimize host + // syscalls. AddressSpace mappings must be removed before + // mm.decPrivateRef(). + mm.unmapASLocked(ar) + didUnmapAS = true + } + if pma.private { + mm.decPrivateRef(pseg.fileRange()) + } + pma.file.DecRef(pseg.fileRange()) + mm.removeRSSLocked(pseg.Range()) + + pseg = mm.pmas.Remove(pseg).NextSegment() + } + + // "If there are some parts of the specified address space that are not + // mapped, the Linux version of madvise() ignores them and applies the call + // to the rest (but returns ENOMEM from the system call, as it should)." - + // madvise(2) + if mm.vmas.SpanRange(ar) != ar.Length() { + return syserror.ENOMEM + } + return nil +} + +// Sync implements the semantics of Linux's msync(MS_SYNC). +func (mm *MemoryManager) Sync(ctx context.Context, addr usermem.Addr, length uint64) error { + ar, ok := addr.ToRange(length) + if !ok { + return syserror.ENOMEM + } + + mm.mappingMu.RLock() + // Can't defer mm.mappingMu.RUnlock(); see below. + vseg := mm.vmas.LowerBoundSegment(ar.Start) + if !vseg.Ok() { + mm.mappingMu.RUnlock() + return syserror.ENOMEM + } + var unmapped bool + lastEnd := ar.Start + for { + if !vseg.Ok() { + mm.mappingMu.RUnlock() + unmapped = true + break + } + if lastEnd < vseg.Start() { + unmapped = true + } + lastEnd = vseg.End() + vma := vseg.ValuePtr() + // It's only possible to have dirtied the Mappable through a shared + // mapping. Don't check if the mapping is writable, because mprotect + // may have changed this, and also because Linux doesn't. + if id := vma.id; id != nil && vma.mappable != nil && !vma.private { + // We can't call memmap.MappingIdentity.Msync while holding + // mm.mappingMu since it may take fs locks that precede it in the + // lock order. + id.IncRef() + mr := vseg.mappableRangeOf(vseg.Range().Intersect(ar)) + mm.mappingMu.RUnlock() + err := id.Msync(ctx, mr) + id.DecRef() + if err != nil { + return err + } + if lastEnd >= ar.End { + break + } + mm.mappingMu.RLock() + vseg = mm.vmas.LowerBoundSegment(lastEnd) + } else { + if lastEnd >= ar.End { + mm.mappingMu.RUnlock() + break + } + vseg = vseg.NextSegment() + } + } + + if unmapped { + return syserror.ENOMEM + } + return nil +} + +// VirtualMemorySize returns the combined length in bytes of all mappings in +// mm. +func (mm *MemoryManager) VirtualMemorySize() uint64 { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + return uint64(mm.usageAS) +} + +// VirtualMemorySizeRange returns the combined length in bytes of all mappings +// in ar in mm. +func (mm *MemoryManager) VirtualMemorySizeRange(ar usermem.AddrRange) uint64 { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + return uint64(mm.vmas.SpanRange(ar)) +} + +// ResidentSetSize returns the value advertised as mm's RSS in bytes. +func (mm *MemoryManager) ResidentSetSize() uint64 { + mm.activeMu.RLock() + defer mm.activeMu.RUnlock() + return uint64(mm.curRSS) +} + +// MaxResidentSetSize returns the value advertised as mm's max RSS in bytes. +func (mm *MemoryManager) MaxResidentSetSize() uint64 { + mm.activeMu.RLock() + defer mm.activeMu.RUnlock() + return uint64(mm.maxRSS) +} diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go new file mode 100644 index 000000000..b6af48cb7 --- /dev/null +++ b/pkg/sentry/mm/vma.go @@ -0,0 +1,476 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mm + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Preconditions: mm.mappingMu must be locked for writing. opts must be valid +// as defined by the checks in MMap. +func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOpts) (vmaIterator, usermem.AddrRange, error) { + if opts.MaxPerms != opts.MaxPerms.Effective() { + panic(fmt.Sprintf("Non-effective MaxPerms %s cannot be enforced", opts.MaxPerms)) + } + + // Find a useable range. + addr, err := mm.findAvailableLocked(opts.Length, findAvailableOpts{ + Addr: opts.Addr, + Fixed: opts.Fixed, + Unmap: opts.Unmap, + }) + if err != nil { + return vmaIterator{}, usermem.AddrRange{}, err + } + ar, _ := addr.ToRange(opts.Length) + + // Check against RLIMIT_AS. + newUsageAS := mm.usageAS + opts.Length + if opts.Unmap { + newUsageAS -= uint64(mm.vmas.SpanRange(ar)) + } + if limitAS := limits.FromContext(ctx).Get(limits.AS).Cur; newUsageAS > limitAS { + return vmaIterator{}, usermem.AddrRange{}, syserror.ENOMEM + } + + // Remove overwritten mappings. This ordering is consistent with Linux: + // compare Linux's mm/mmap.c:mmap_region() => do_munmap(), + // file->f_op->mmap(). + var vgap vmaGapIterator + if opts.Unmap { + vgap = mm.unmapLocked(ctx, ar) + } else { + vgap = mm.vmas.FindGap(ar.Start) + } + + // Inform the Mappable, if any, of the new mapping. + if opts.Mappable != nil { + if err := opts.Mappable.AddMapping(ctx, mm, ar, opts.Offset); err != nil { + return vmaIterator{}, usermem.AddrRange{}, err + } + } + + // Take a reference on opts.MappingIdentity before inserting the vma since + // vma merging can drop the reference. + if opts.MappingIdentity != nil { + opts.MappingIdentity.IncRef() + } + + // Finally insert the vma. + vseg := mm.vmas.Insert(vgap, ar, vma{ + mappable: opts.Mappable, + off: opts.Offset, + realPerms: opts.Perms, + effectivePerms: opts.Perms.Effective(), + maxPerms: opts.MaxPerms, + private: opts.Private, + growsDown: opts.GrowsDown, + id: opts.MappingIdentity, + hint: opts.Hint, + }) + mm.usageAS += opts.Length + + return vseg, ar, nil +} + +type findAvailableOpts struct { + // Addr is a suggested address. Addr must be page-aligned. + Addr usermem.Addr + + // Fixed is true if only the suggested address is acceptable. + Fixed bool + + // Unmap is true if existing vmas and guard pages may exist in the returned + // range. + Unmap bool +} + +// findAvailableLocked finds an allocatable range. +// +// Preconditions: mm.mappingMu must be locked. +func (mm *MemoryManager) findAvailableLocked(length uint64, opts findAvailableOpts) (usermem.Addr, error) { + // Does the provided suggestion work? + if ar, ok := opts.Addr.ToRange(length); ok { + if mm.applicationAddrRange().IsSupersetOf(ar) { + if opts.Unmap { + return ar.Start, nil + } + // Check for the presence of an existing vma or guard page. + if vgap := mm.vmas.FindGap(ar.Start); vgap.Ok() && vgap.availableRange().IsSupersetOf(ar) { + return ar.Start, nil + } + } + } + + // Fixed mappings accept only the requested address. + if opts.Fixed { + return 0, syserror.ENOMEM + } + + // Prefer hugepage alignment if a hugepage or more is requested. + alignment := uint64(usermem.PageSize) + if length >= usermem.HugePageSize { + alignment = usermem.HugePageSize + } + + if mm.layout.DefaultDirection == arch.MmapBottomUp { + return mm.findLowestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.BottomUpBase, mm.layout.MaxAddr}) + } + return mm.findHighestAvailableLocked(length, alignment, usermem.AddrRange{mm.layout.MinAddr, mm.layout.TopDownBase}) +} + +func (mm *MemoryManager) applicationAddrRange() usermem.AddrRange { + return usermem.AddrRange{mm.layout.MinAddr, mm.layout.MaxAddr} +} + +// Preconditions: mm.mappingMu must be locked. +func (mm *MemoryManager) findLowestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) { + for gap := mm.vmas.LowerBoundGap(bounds.Start); gap.Ok() && gap.Start() < bounds.End; gap = gap.NextGap() { + if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length { + // Can we shift up to match the alignment? + if offset := uint64(gr.Start) % alignment; offset != 0 { + if uint64(gr.Length()) >= length+alignment-offset { + // Yes, we're aligned. + return gr.Start + usermem.Addr(alignment-offset), nil + } + } + + // Either aligned perfectly, or can't align it. + return gr.Start, nil + } + } + return 0, syserror.ENOMEM +} + +// Preconditions: mm.mappingMu must be locked. +func (mm *MemoryManager) findHighestAvailableLocked(length, alignment uint64, bounds usermem.AddrRange) (usermem.Addr, error) { + for gap := mm.vmas.UpperBoundGap(bounds.End); gap.Ok() && gap.End() > bounds.Start; gap = gap.PrevGap() { + if gr := gap.availableRange().Intersect(bounds); uint64(gr.Length()) >= length { + // Can we shift down to match the alignment? + start := gr.End - usermem.Addr(length) + if offset := uint64(start) % alignment; offset != 0 { + if gr.Start <= start-usermem.Addr(offset) { + // Yes, we're aligned. + return start - usermem.Addr(offset), nil + } + } + + // Either aligned perfectly, or can't align it. + return start, nil + } + } + return 0, syserror.ENOMEM +} + +// getVMAsLocked ensures that vmas exist for all addresses in ar, and support +// access of type (at, ignorePermissions). It returns: +// +// - An iterator to the vma containing ar.Start. If no vma contains ar.Start, +// the iterator is unspecified. +// +// - An iterator to the gap after the last vma containing an address in ar. If +// vmas exist for no addresses in ar, the iterator is to a gap that begins +// before ar.Start. +// +// - An error that is non-nil if vmas exist for only a subset of ar. +// +// Preconditions: mm.mappingMu must be locked for reading; it may be +// temporarily unlocked. ar.Length() != 0. +func (mm *MemoryManager) getVMAsLocked(ctx context.Context, ar usermem.AddrRange, at usermem.AccessType, ignorePermissions bool) (vmaIterator, vmaGapIterator, error) { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + // Inline mm.vmas.LowerBoundSegment so that we have the preceding gap if + // !vbegin.Ok(). + vbegin, vgap := mm.vmas.Find(ar.Start) + if !vbegin.Ok() { + vbegin = vgap.NextSegment() + // vseg.Ok() is checked before entering the following loop. + } else { + vgap = vbegin.PrevGap() + } + + addr := ar.Start + vseg := vbegin + for vseg.Ok() { + // Loop invariants: vgap = vseg.PrevGap(); addr < vseg.End(). + vma := vseg.ValuePtr() + if addr < vseg.Start() { + // TODO: Implement vma.growsDown here. + return vbegin, vgap, syserror.EFAULT + } + + perms := vma.effectivePerms + if ignorePermissions { + perms = vma.maxPerms + } + if !perms.SupersetOf(at) { + return vbegin, vgap, syserror.EPERM + } + + addr = vseg.End() + vgap = vseg.NextGap() + if addr >= ar.End { + return vbegin, vgap, nil + } + vseg = vgap.NextSegment() + } + + // Ran out of vmas before ar.End. + return vbegin, vgap, syserror.EFAULT +} + +// getVecVMAsLocked ensures that vmas exist for all addresses in ars, and +// support access to type of (at, ignorePermissions). It retuns the subset of +// ars for which vmas exist. If this is not equal to ars, it returns a non-nil +// error explaining why. +// +// Preconditions: mm.mappingMu must be locked for reading; it may be +// temporarily unlocked. +// +// Postconditions: ars is not mutated. +func (mm *MemoryManager) getVecVMAsLocked(ctx context.Context, ars usermem.AddrRangeSeq, at usermem.AccessType, ignorePermissions bool) (usermem.AddrRangeSeq, error) { + for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() { + ar := arsit.Head() + if ar.Length() == 0 { + continue + } + if _, vend, err := mm.getVMAsLocked(ctx, ar, at, ignorePermissions); err != nil { + return truncatedAddrRangeSeq(ars, arsit, vend.Start()), err + } + } + return ars, nil +} + +// vma extension will not shrink the number of unmapped bytes between the start +// of a growsDown vma and the end of its predecessor non-growsDown vma below +// guardBytes. +// +// guardBytes is equivalent to Linux's stack_guard_gap after upstream +// 1be7107fbe18 "mm: larger stack guard gap, between vmas". +const guardBytes = 256 * usermem.PageSize + +// unmapLocked unmaps all addresses in ar and returns the resulting gap in +// mm.vmas. +// +// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0. +// ar must be page-aligned. +func (mm *MemoryManager) unmapLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + // AddressSpace mappings and pmas must be invalidated before + // mm.removeVMAsLocked() => memmap.Mappable.RemoveMapping(). + mm.Invalidate(ar, memmap.InvalidateOpts{InvalidatePrivate: true}) + return mm.removeVMAsLocked(ctx, ar) +} + +// removeVMAsLocked removes vmas for addresses in ar and returns the resulting +// gap in mm.vmas. It does not remove pmas or AddressSpace mappings; clients +// must do so before calling removeVMAsLocked. +// +// Preconditions: mm.mappingMu must be locked for writing. ar.Length() != 0. ar +// must be page-aligned. +func (mm *MemoryManager) removeVMAsLocked(ctx context.Context, ar usermem.AddrRange) vmaGapIterator { + if checkInvariants { + if !ar.WellFormed() || ar.Length() <= 0 || !ar.IsPageAligned() { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + } + + vseg, vgap := mm.vmas.Find(ar.Start) + if vgap.Ok() { + vseg = vgap.NextSegment() + } + for vseg.Ok() && vseg.Start() < ar.End { + vseg = mm.vmas.Isolate(vseg, ar) + vmaAR := vseg.Range() + vma := vseg.ValuePtr() + if vma.mappable != nil { + vma.mappable.RemoveMapping(ctx, mm, vmaAR, vma.off) + } + if vma.id != nil { + vma.id.DecRef() + } + mm.usageAS -= uint64(vmaAR.Length()) + vgap = mm.vmas.Remove(vseg) + vseg = vgap.NextSegment() + } + return vgap +} + +// vmaSetFunctions implements segment.Functions for vmaSet. +type vmaSetFunctions struct{} + +func (vmaSetFunctions) MinKey() usermem.Addr { + return 0 +} + +func (vmaSetFunctions) MaxKey() usermem.Addr { + return ^usermem.Addr(0) +} + +func (vmaSetFunctions) ClearValue(vma *vma) { + vma.mappable = nil + vma.id = nil + vma.hint = "" +} + +func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRange, vma2 vma) (vma, bool) { + if vma1.mappable != vma2.mappable || + (vma1.mappable != nil && vma1.off+uint64(ar1.Length()) != vma2.off) || + vma1.realPerms != vma2.realPerms || + vma1.maxPerms != vma2.maxPerms || + vma1.private != vma2.private || + vma1.growsDown != vma2.growsDown || + vma1.id != vma2.id || + vma1.hint != vma2.hint { + return vma{}, false + } + + if vma2.id != nil { + vma2.id.DecRef() + } + return vma1, true +} + +func (vmaSetFunctions) Split(ar usermem.AddrRange, v vma, split usermem.Addr) (vma, vma) { + v2 := v + if v2.mappable != nil { + v2.off += uint64(split - ar.Start) + } + if v2.id != nil { + v2.id.IncRef() + } + return v, v2 +} + +// Preconditions: vseg.ValuePtr().mappable != nil. vseg.Range().Contains(addr). +func (vseg vmaIterator) mappableOffsetAt(addr usermem.Addr) uint64 { + if checkInvariants { + if !vseg.Ok() { + panic("terminal vma iterator") + } + if vseg.ValuePtr().mappable == nil { + panic("Mappable offset is meaningless for anonymous vma") + } + if !vseg.Range().Contains(addr) { + panic(fmt.Sprintf("addr %v out of bounds %v", addr, vseg.Range())) + } + } + + vma := vseg.ValuePtr() + vstart := vseg.Start() + return vma.off + uint64(addr-vstart) +} + +// Preconditions: vseg.ValuePtr().mappable != nil. +func (vseg vmaIterator) mappableRange() memmap.MappableRange { + return vseg.mappableRangeOf(vseg.Range()) +} + +// Preconditions: vseg.ValuePtr().mappable != nil. +// vseg.Range().IsSupersetOf(ar). ar.Length() != 0. +func (vseg vmaIterator) mappableRangeOf(ar usermem.AddrRange) memmap.MappableRange { + if checkInvariants { + if !vseg.Ok() { + panic("terminal vma iterator") + } + if vseg.ValuePtr().mappable == nil { + panic("MappableRange is meaningless for anonymous vma") + } + if !ar.WellFormed() || ar.Length() <= 0 { + panic(fmt.Sprintf("invalid ar: %v", ar)) + } + if !vseg.Range().IsSupersetOf(ar) { + panic(fmt.Sprintf("ar %v out of bounds %v", ar, vseg.Range())) + } + } + + vma := vseg.ValuePtr() + vstart := vseg.Start() + return memmap.MappableRange{vma.off + uint64(ar.Start-vstart), vma.off + uint64(ar.End-vstart)} +} + +// Preconditions: vseg.ValuePtr().mappable != nil. +// vseg.mappableRange().IsSupersetOf(mr). mr.Length() != 0. +func (vseg vmaIterator) addrRangeOf(mr memmap.MappableRange) usermem.AddrRange { + if checkInvariants { + if !vseg.Ok() { + panic("terminal vma iterator") + } + if vseg.ValuePtr().mappable == nil { + panic("MappableRange is meaningless for anonymous vma") + } + if !mr.WellFormed() || mr.Length() <= 0 { + panic(fmt.Sprintf("invalid mr: %v", mr)) + } + if !vseg.mappableRange().IsSupersetOf(mr) { + panic(fmt.Sprintf("mr %v out of bounds %v", mr, vseg.mappableRange())) + } + } + + vma := vseg.ValuePtr() + vstart := vseg.Start() + return usermem.AddrRange{vstart + usermem.Addr(mr.Start-vma.off), vstart + usermem.Addr(mr.End-vma.off)} +} + +// seekNextLowerBound returns mm.vmas.LowerBoundSegment(addr), but does so by +// scanning linearly forward from vseg. +// +// Preconditions: mm.mappingMu must be locked. addr >= vseg.Start(). +func (vseg vmaIterator) seekNextLowerBound(addr usermem.Addr) vmaIterator { + if checkInvariants { + if !vseg.Ok() { + panic("terminal vma iterator") + } + if addr < vseg.Start() { + panic(fmt.Sprintf("can't seek forward to %#x from %#x", addr, vseg.Start())) + } + } + for vseg.Ok() && addr >= vseg.End() { + vseg = vseg.NextSegment() + } + return vseg +} + +// availableRange returns the subset of vgap.Range() in which new vmas may be +// created without MMapOpts.Unmap == true. +func (vgap vmaGapIterator) availableRange() usermem.AddrRange { + ar := vgap.Range() + next := vgap.NextSegment() + if !next.Ok() || !next.ValuePtr().growsDown { + return ar + } + // Exclude guard pages. + if ar.Length() < guardBytes { + return usermem.AddrRange{ar.Start, ar.Start} + } + ar.End -= guardBytes + return ar +} diff --git a/pkg/sentry/platform/BUILD b/pkg/sentry/platform/BUILD new file mode 100644 index 000000000..d5be81f8d --- /dev/null +++ b/pkg/sentry/platform/BUILD @@ -0,0 +1,51 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "platform_state", + srcs = [ + "file_range.go", + ], + out = "platform_state.go", + package = "platform", +) + +go_template_instance( + name = "file_range", + out = "file_range.go", + package = "platform", + prefix = "File", + template = "//pkg/segment:generic_range", + types = { + "T": "uint64", + }, +) + +go_library( + name = "platform", + srcs = [ + "context.go", + "file_range.go", + "mmap_min_addr.go", + "platform.go", + "platform_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/atomicbitops", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/platform/safecopy", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/platform/context.go b/pkg/sentry/platform/context.go new file mode 100644 index 000000000..0d200a5e2 --- /dev/null +++ b/pkg/sentry/platform/context.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package platform + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the auth package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxPlatform is a Context.Value key for a Platform. + CtxPlatform contextID = iota +) + +// FromContext returns the Platform that is used to execute ctx's application +// code, or nil if no such Platform exists. +func FromContext(ctx context.Context) Platform { + if v := ctx.Value(CtxPlatform); v != nil { + return v.(Platform) + } + return nil +} diff --git a/pkg/sentry/platform/filemem/BUILD b/pkg/sentry/platform/filemem/BUILD new file mode 100644 index 000000000..3c4d5b0b6 --- /dev/null +++ b/pkg/sentry/platform/filemem/BUILD @@ -0,0 +1,69 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "filemem_autogen_state", + srcs = [ + "filemem.go", + "filemem_state.go", + "usage_set.go", + ], + out = "filemem_autogen_state.go", + package = "filemem", +) + +go_template_instance( + name = "usage_set", + out = "usage_set.go", + consts = { + "minDegree": "10", + }, + imports = { + "platform": "gvisor.googlesource.com/gvisor/pkg/sentry/platform", + }, + package = "filemem", + prefix = "usage", + template = "//pkg/segment:generic_set", + types = { + "Key": "uint64", + "Range": "platform.FileRange", + "Value": "usageInfo", + "Functions": "usageSetFunctions", + }, +) + +go_library( + name = "filemem", + srcs = [ + "filemem.go", + "filemem_autogen_state.go", + "filemem_state.go", + "filemem_unsafe.go", + "usage_set.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/filemem", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/memutil", + "//pkg/sentry/platform", + "//pkg/sentry/safemem", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + ], +) + +go_test( + name = "filemem_test", + size = "small", + srcs = ["filemem_test.go"], + embed = [":filemem"], + deps = ["//pkg/sentry/usermem"], +) diff --git a/pkg/sentry/platform/filemem/filemem.go b/pkg/sentry/platform/filemem/filemem.go new file mode 100644 index 000000000..d79c3c7f1 --- /dev/null +++ b/pkg/sentry/platform/filemem/filemem.go @@ -0,0 +1,838 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package filemem provides a reusable implementation of platform.Memory. +// +// It enables memory to be sourced from a memfd file. +// +// Lock order: +// +// filemem.FileMem.mu +// filemem.FileMem.mappingsMu +package filemem + +import ( + "fmt" + "math" + "os" + "sync" + "sync/atomic" + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/memutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// FileMem is a platform.Memory that allocates from a host file that it owns. +type FileMem struct { + // Filemem models the backing file as follows: + // + // Each page in the file can be committed or uncommitted. A page is + // committed if the host kernel is spending resources to store its contents + // and uncommitted otherwise. This definition includes pages that the host + // kernel has swapped; this is intentional, to ensure that accounting does + // not change even if host kernel swapping behavior changes, and that + // memory used by pseudo-swap mechanisms like zswap is still accounted. + // + // The initial contents of uncommitted pages are implicitly zero bytes. A + // read or write to the contents of an uncommitted page causes it to be + // committed. This is the only event that can cause a uncommitted page to + // be committed. + // + // fallocate(FALLOC_FL_PUNCH_HOLE) (FileMem.Decommit) causes committed + // pages to be uncommitted. This is the only event that can cause a + // committed page to be uncommitted. + // + // Filemem's accounting is based on identifying the set of committed pages. + // Since filemem does not have direct access to the MMU, tracking reads and + // writes to uncommitted pages to detect commitment would introduce + // additional page faults, which would be prohibitively expensive. Instead, + // filemem queries the host kernel to determine which pages are committed. + + // file is the backing memory file. The file pointer is immutable. + file *os.File + + mu sync.Mutex + + // usage maps each page in the file to metadata for that page. Pages for + // which no segment exists in usage are both unallocated (not in use) and + // uncommitted. + // + // Since usage stores usageInfo objects by value, clients should usually + // use usageIterator.ValuePtr() instead of usageIterator.Value() to get a + // pointer to the usageInfo rather than a copy. + // + // usage must be kept maximally merged (that is, there should never be two + // adjacent segments with the same values). At least markReclaimed depends + // on this property. + // + // usage is protected by mu. + usage usageSet + + // The UpdateUsage function scans all segments with knownCommitted set + // to false, sees which pages are committed and creates corresponding + // segments with knownCommitted set to true. + // + // In order to avoid unnecessary scans, usageExpected tracks the total + // file blocks expected. This is used to elide the scan when this + // matches the underlying file blocks. + // + // To track swapped pages, usageSwapped tracks the discrepency between + // what is observed in core and what is reported by the file. When + // usageSwapped is non-zero, a sweep will be performed at least every + // second. The start of the last sweep is recorded in usageLast. + // + // All usage attributes are all protected by mu. + usageExpected uint64 + usageSwapped uint64 + usageLast time.Time + + // fileSize is the size of the backing memory file in bytes. fileSize is + // always a power-of-two multiple of chunkSize. + // + // fileSize is protected by mu. + fileSize int64 + + // destroyed is set by Destroy to instruct the reclaimer goroutine to + // release resources and exit. destroyed is protected by mu. + destroyed bool + + // reclaimable is true if usage may contain reclaimable pages. reclaimable + // is protected by mu. + reclaimable bool + + // reclaimCond is signaled (with mu locked) when reclaimable or destroyed + // transitions from false to true. + reclaimCond sync.Cond + + // Filemem pages are mapped into the local address space on the granularity + // of large pieces called chunks. mappings is a []uintptr that stores, for + // each chunk, the start address of a mapping of that chunk in the current + // process' address space, or 0 if no such mapping exists. Once a chunk is + // mapped, it is never remapped or unmapped until the filemem is destroyed. + // + // Mutating the mappings slice or its contents requires both holding + // mappingsMu and using atomic memory operations. (The slice is mutated + // whenever the file is expanded. Per the above, the only permitted + // mutation of the slice's contents is the assignment of a mapping to a + // chunk that was previously unmapped.) Reading the slice or its contents + // only requires *either* holding mappingsMu or using atomic memory + // operations. This allows FileMem.AccessPhysical to avoid locking in the + // common case where chunk mappings already exist. + + mappingsMu sync.Mutex + mappings atomic.Value +} + +// usage tracks usage information. +type usageInfo struct { + // kind is the usage kind. + kind usage.MemoryKind + + // knownCommitted indicates whether this region is known to be + // committed. If this is false, then the region may or may not have + // been touched. If it is true however, then mincore (below) has + // indicated that the page is present at least once. + knownCommitted bool + + refs uint64 +} + +func (u *usageInfo) incRef() { + u.refs++ +} + +func (u *usageInfo) decRef() { + if u.refs == 0 { + panic("DecRef at 0 refs!") + } + u.refs-- +} + +const ( + chunkShift = 24 + chunkSize = 1 << chunkShift // 16 MB + chunkMask = chunkSize - 1 + + initialSize = chunkSize +) + +// newFromFile creates a FileMem backed by the given file. +func newFromFile(file *os.File) (*FileMem, error) { + if err := file.Truncate(initialSize); err != nil { + return nil, err + } + f := &FileMem{ + fileSize: initialSize, + file: file, + } + f.reclaimCond.L = &f.mu + f.mappings.Store(make([]uintptr, initialSize/chunkSize)) + go f.runReclaim() // S/R-SAFE: f.mu + + // The Linux kernel contains an optional feature called "Integrity + // Measurement Architecture" (IMA). If IMA is enabled, it will checksum + // binaries the first time they are mapped PROT_EXEC. This is bad news for + // executable pages mapped from FileMem, which can grow to terabytes in + // (sparse) size. If IMA attempts to checksum a file that large, it will + // allocate all of the sparse pages and quickly exhaust all memory. + // + // Work around IMA by immediately creating a temporary PROT_EXEC mapping, + // while FileMem is still small. IMA will ignore any future mappings. + m, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + 0, + usermem.PageSize, + syscall.PROT_EXEC, + syscall.MAP_SHARED, + f.file.Fd(), + 0) + if errno != 0 { + // This isn't fatal to filemem (IMA may not even be in use). Log the + // error, but don't return it. + log.Warningf("Failed to pre-map FileMem PROT_EXEC: %v", errno) + } else { + syscall.Syscall( + syscall.SYS_MUNMAP, + m, + usermem.PageSize, + 0) + } + + return f, nil +} + +// New creates a FileMem backed by a memfd file. +func New(name string) (*FileMem, error) { + fd, err := memutil.CreateMemFD(name, 0) + if err != nil { + return nil, err + } + return newFromFile(os.NewFile(uintptr(fd), name)) +} + +// Destroy implements platform.Memory.Destroy. +func (f *FileMem) Destroy() { + f.mu.Lock() + defer f.mu.Unlock() + f.destroyed = true + f.reclaimCond.Signal() +} + +// Allocate implements platform.Memory.Allocate. +func (f *FileMem) Allocate(length uint64, kind usage.MemoryKind) (platform.FileRange, error) { + if length == 0 || length%usermem.PageSize != 0 { + panic(fmt.Sprintf("invalid allocation length: %#x", length)) + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Align hugepage-and-larger allocations on hugepage boundaries to try + // to take advantage of hugetmpfs. + alignment := uint64(usermem.PageSize) + if length >= usermem.HugePageSize { + alignment = usermem.HugePageSize + } + + start := findUnallocatedRange(&f.usage, length, alignment) + end := start + length + // File offsets are int64s. Since length must be strictly positive, end + // cannot legitimately be 0. + if end < start || int64(end) <= 0 { + return platform.FileRange{}, syserror.ENOMEM + } + + // Expand the file if needed. Double the file size on each expansion; + // uncommitted pages have effectively no cost. + fileSize := f.fileSize + for int64(end) > fileSize { + if fileSize >= 2*fileSize { + // fileSize overflow. + return platform.FileRange{}, syserror.ENOMEM + } + fileSize *= 2 + } + if fileSize > f.fileSize { + if err := f.file.Truncate(fileSize); err != nil { + return platform.FileRange{}, err + } + f.fileSize = fileSize + f.mappingsMu.Lock() + oldMappings := f.mappings.Load().([]uintptr) + newMappings := make([]uintptr, fileSize>>chunkShift) + copy(newMappings, oldMappings) + f.mappings.Store(newMappings) + f.mappingsMu.Unlock() + } + + // Mark selected pages as in use. + fr := platform.FileRange{start, end} + if !f.usage.Add(fr, usageInfo{ + kind: kind, + refs: 1, + }) { + panic(fmt.Sprintf("allocating %v: failed to insert into f.usage:\n%v", fr, &f.usage)) + } + return fr, nil +} + +func findUnallocatedRange(usage *usageSet, length, alignment uint64) uint64 { + alignMask := alignment - 1 + var start uint64 + for seg := usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + r := seg.Range() + if start >= r.End { + // start was rounded up to an alignment boundary from the end + // of a previous segment. + continue + } + // This segment represents allocated or reclaimable pages; only the + // range from start to the segment's beginning is allocatable, and the + // next allocatable range begins after the segment. + if r.Start > start && r.Start-start >= length { + break + } + start = (r.End + alignMask) &^ alignMask + } + return start +} + +// fallocate(2) modes, defined in Linux's include/uapi/linux/falloc.h. +const ( + _FALLOC_FL_KEEP_SIZE = 1 + _FALLOC_FL_PUNCH_HOLE = 2 +) + +// Decommit implements platform.Memory.Decommit. +func (f *FileMem) Decommit(fr platform.FileRange) error { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + panic(fmt.Sprintf("invalid range: %v", fr)) + } + + // "After a successful call, subsequent reads from this range will + // return zeroes. The FALLOC_FL_PUNCH_HOLE flag must be ORed with + // FALLOC_FL_KEEP_SIZE in mode ..." - fallocate(2) + err := syscall.Fallocate( + int(f.file.Fd()), + _FALLOC_FL_PUNCH_HOLE|_FALLOC_FL_KEEP_SIZE, + int64(fr.Start), + int64(fr.Length())) + if err != nil { + return err + } + f.markDecommitted(fr) + return nil +} + +func (f *FileMem) markDecommitted(fr platform.FileRange) { + f.mu.Lock() + defer f.mu.Unlock() + // Since we're changing the knownCommitted attribute, we need to merge + // across the entire range to ensure that the usage tree is minimal. + gap := f.usage.ApplyContiguous(fr, func(seg usageIterator) { + val := seg.ValuePtr() + if val.knownCommitted { + // Drop the usageExpected appropriately. + amount := seg.Range().Length() + usage.MemoryAccounting.Dec(amount, val.kind) + f.usageExpected -= amount + val.knownCommitted = false + } + }) + if gap.Ok() { + panic(fmt.Sprintf("Decommit(%v): attempted to decommit unallocated pages %v:\n%v", fr, gap.Range(), &f.usage)) + } + f.usage.MergeRange(fr) +} + +// runReclaim implements the reclaimer goroutine, which continuously decommits +// reclaimable frames in order to reduce memory usage. +func (f *FileMem) runReclaim() { + for { + fr, ok := f.findReclaimable() + if !ok { + break + } + + if err := f.Decommit(fr); err != nil { + log.Warningf("Reclaim failed to decommit %v: %v", fr, err) + // Zero the frames manually. This won't reduce memory usage, but at + // least ensures that the frames will be zero when reallocated. + f.forEachMappingSlice(fr, func(bs []byte) { + for i := range bs { + bs[i] = 0 + } + }) + // Pretend the frames were decommitted even though they weren't, + // since the memory accounting implementation has no idea how to + // deal with this. + f.markDecommitted(fr) + } + f.markReclaimed(fr) + } + // We only get here if findReclaimable finds f.destroyed set and returns + // false. + f.mu.Lock() + defer f.mu.Unlock() + if !f.destroyed { + panic("findReclaimable broke out of reclaim loop, but f.destroyed is no longer set") + } + f.file.Close() + // Ensure that any attempts to use f.file.Fd() fail instead of getting a fd + // that has possibly been reassigned. + f.file = nil + mappings := f.mappings.Load().([]uintptr) + for i, m := range mappings { + if m != 0 { + _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, m, chunkSize, 0) + if errno != 0 { + log.Warningf("Failed to unmap mapping %#x for filemem chunk %d: %v", m, i, errno) + } + } + } + // Similarly, invalidate f.mappings. (atomic.Value.Store(nil) panics.) + f.mappings.Store([]uintptr{}) +} + +func (f *FileMem) findReclaimable() (platform.FileRange, bool) { + f.mu.Lock() + defer f.mu.Unlock() + for { + for { + if f.destroyed { + return platform.FileRange{}, false + } + if f.reclaimable { + break + } + f.reclaimCond.Wait() + } + // Allocate returns the first usable range in offset order and is + // currently a linear scan, so reclaiming from the beginning of the + // file minimizes the expected latency of Allocate. + for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + if seg.ValuePtr().refs == 0 { + return seg.Range(), true + } + } + f.reclaimable = false + } +} + +func (f *FileMem) markReclaimed(fr platform.FileRange) { + f.mu.Lock() + defer f.mu.Unlock() + seg := f.usage.FindSegment(fr.Start) + // All of fr should be mapped to a single uncommitted reclaimable segment + // accounted to System. + if !seg.Ok() { + panic(fmt.Sprintf("Reclaimed pages %v include unreferenced pages:\n%v", fr, &f.usage)) + } + if !seg.Range().IsSupersetOf(fr) { + panic(fmt.Sprintf("Reclaimed pages %v are not entirely contained in segment %v with state %v:\n%v", fr, seg.Range(), seg.Value(), &f.usage)) + } + if got, want := seg.Value(), (usageInfo{ + kind: usage.System, + knownCommitted: false, + refs: 0, + }); got != want { + panic(fmt.Sprintf("Reclaimed pages %v in segment %v has incorrect state %v, wanted %v:\n%v", fr, seg.Range(), got, want, &f.usage)) + } + // Deallocate reclaimed pages. Even though all of seg is reclaimable, the + // caller of markReclaimed may not have decommitted it, so we can only mark + // fr as reclaimed. + f.usage.Remove(f.usage.Isolate(seg, fr)) +} + +// MapInto implements platform.File.MapInto. +func (f *FileMem) MapInto(as platform.AddressSpace, addr usermem.Addr, fr platform.FileRange, at usermem.AccessType, precommit bool) error { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + panic(fmt.Sprintf("invalid range: %v", fr)) + } + return as.MapFile(addr, int(f.file.Fd()), fr, at, precommit) +} + +// MapInternal implements platform.File.MapInternal. +func (f *FileMem) MapInternal(fr platform.FileRange, at usermem.AccessType) (safemem.BlockSeq, error) { + if !fr.WellFormed() || fr.Length() == 0 { + panic(fmt.Sprintf("invalid range: %v", fr)) + } + if at.Execute { + return safemem.BlockSeq{}, syserror.EACCES + } + + chunks := ((fr.End + chunkMask) >> chunkShift) - (fr.Start >> chunkShift) + if chunks == 1 { + // Avoid an unnecessary slice allocation. + var seq safemem.BlockSeq + err := f.forEachMappingSlice(fr, func(bs []byte) { + seq = safemem.BlockSeqOf(safemem.BlockFromSafeSlice(bs)) + }) + return seq, err + } + blocks := make([]safemem.Block, 0, chunks) + err := f.forEachMappingSlice(fr, func(bs []byte) { + blocks = append(blocks, safemem.BlockFromSafeSlice(bs)) + }) + return safemem.BlockSeqFromSlice(blocks), err +} + +// IncRef implements platform.File.IncRef. +func (f *FileMem) IncRef(fr platform.FileRange) { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + panic(fmt.Sprintf("invalid range: %v", fr)) + } + + f.mu.Lock() + defer f.mu.Unlock() + + gap := f.usage.ApplyContiguous(fr, func(seg usageIterator) { + seg.ValuePtr().incRef() + }) + if gap.Ok() { + panic(fmt.Sprintf("IncRef(%v): attempted to IncRef on unallocated pages %v:\n%v", fr, gap.Range(), &f.usage)) + } +} + +// DecRef implements platform.File.DecRef. +func (f *FileMem) DecRef(fr platform.FileRange) { + if !fr.WellFormed() || fr.Length() == 0 || fr.Start%usermem.PageSize != 0 || fr.End%usermem.PageSize != 0 { + panic(fmt.Sprintf("invalid range: %v", fr)) + } + + var freed bool + + f.mu.Lock() + defer f.mu.Unlock() + + for seg := f.usage.FindSegment(fr.Start); seg.Ok() && seg.Start() < fr.End; seg = seg.NextSegment() { + seg = f.usage.Isolate(seg, fr) + val := seg.ValuePtr() + val.decRef() + if val.refs == 0 { + freed = true + // Reclassify memory as System, until it's freed by the reclaim + // goroutine. + if val.knownCommitted { + usage.MemoryAccounting.Move(seg.Range().Length(), usage.System, val.kind) + } + val.kind = usage.System + } + } + f.usage.MergeAdjacent(fr) + + if freed { + f.reclaimable = true + f.reclaimCond.Signal() + } +} + +// Flush implements platform.Mappable.Flush. +func (f *FileMem) Flush(ctx context.Context) error { + return nil +} + +// forEachMappingSlice invokes fn on a sequence of byte slices that +// collectively map all bytes in fr. +func (f *FileMem) forEachMappingSlice(fr platform.FileRange, fn func([]byte)) error { + mappings := f.mappings.Load().([]uintptr) + for chunkStart := fr.Start &^ chunkMask; chunkStart < fr.End; chunkStart += chunkSize { + chunk := int(chunkStart >> chunkShift) + m := atomic.LoadUintptr(&mappings[chunk]) + if m == 0 { + var err error + mappings, m, err = f.getChunkMapping(chunk) + if err != nil { + return err + } + } + startOff := uint64(0) + if chunkStart < fr.Start { + startOff = fr.Start - chunkStart + } + endOff := uint64(chunkSize) + if chunkStart+chunkSize > fr.End { + endOff = fr.End - chunkStart + } + fn(unsafeSlice(m, chunkSize)[startOff:endOff]) + } + return nil +} + +func (f *FileMem) getChunkMapping(chunk int) ([]uintptr, uintptr, error) { + f.mappingsMu.Lock() + defer f.mappingsMu.Unlock() + // Another thread may have replaced f.mappings altogether due to file + // expansion. + mappings := f.mappings.Load().([]uintptr) + // Another thread may have already mapped the chunk. + if m := mappings[chunk]; m != 0 { + return mappings, m, nil + } + m, _, errno := syscall.Syscall6( + syscall.SYS_MMAP, + 0, + chunkSize, + syscall.PROT_READ|syscall.PROT_WRITE, + syscall.MAP_SHARED, + f.file.Fd(), + uintptr(chunk<<chunkShift)) + if errno != 0 { + return nil, 0, errno + } + atomic.StoreUintptr(&mappings[chunk], m) + return mappings, m, nil +} + +// UpdateUsage implements platform.Memory.UpdateUsage. +func (f *FileMem) UpdateUsage() error { + f.mu.Lock() + defer f.mu.Unlock() + + // If the underlying usage matches where the usage tree already + // represents, then we can just avoid the entire scan (we know it's + // accurate). + currentUsage, err := f.TotalUsage() + if err != nil { + return err + } + if currentUsage == f.usageExpected && f.usageSwapped == 0 { + log.Debugf("UpdateUsage: skipped with usageSwapped=0.") + return nil + } + // If the current usage matches the expected but there's swap + // accounting, then ensure a scan takes place at least every second + // (when requested). + if currentUsage == f.usageExpected+f.usageSwapped && time.Now().Before(f.usageLast.Add(time.Second)) { + log.Debugf("UpdateUsage: skipped with usageSwapped!=0.") + return nil + } + + f.usageLast = time.Now() + err = f.updateUsageLocked(currentUsage, mincore) + log.Debugf("UpdateUsage: currentUsage=%d, usageExpected=%d, usageSwapped=%d.", + currentUsage, f.usageExpected, f.usageSwapped) + log.Debugf("UpdateUsage: took %v.", time.Since(f.usageLast)) + return err +} + +// updateUsageLocked attempts to detect commitment of previous-uncommitted +// pages by invoking checkCommitted, which is a function that, for each page i +// in bs, sets committed[i] to 1 if the page is committed and 0 otherwise. +// +// Precondition: f.mu must be held. +func (f *FileMem) updateUsageLocked(currentUsage uint64, checkCommitted func(bs []byte, committed []byte) error) error { + // Track if anything changed to elide the merge. In the common case, we + // expect all segments to be committed and no merge to occur. + changedAny := false + defer func() { + if changedAny { + f.usage.MergeAll() + } + + // Adjust the swap usage to reflect reality. + if f.usageExpected < currentUsage { + // Since no pages may be decommitted while we hold usageMu, we + // know that usage may have only increased since we got the + // last current usage. Therefore, if usageExpected is still + // short of currentUsage, we must assume that the difference is + // in pages that have been swapped. + newUsageSwapped := currentUsage - f.usageExpected + if f.usageSwapped < newUsageSwapped { + usage.MemoryAccounting.Inc(newUsageSwapped-f.usageSwapped, usage.System) + } else { + usage.MemoryAccounting.Dec(f.usageSwapped-newUsageSwapped, usage.System) + } + f.usageSwapped = newUsageSwapped + } else if f.usageSwapped != 0 { + // We have more usage accounted for than the file itself. + // That's fine, we probably caught a race where pages were + // being committed while the above loop was running. Just + // report the higher number that we found and ignore swap. + usage.MemoryAccounting.Dec(f.usageSwapped, usage.System) + f.usageSwapped = 0 + } + }() + + // Reused mincore buffer, will generally be <= 4096 bytes. + var buf []byte + + // Iterate over all usage data. There will only be usage segments + // present when there is an associated reference. + for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + val := seg.Value() + + // Already known to be committed; ignore. + if val.knownCommitted { + continue + } + + // Assume that reclaimable pages (that aren't already known to be + // committed) are not committed. This isn't necessarily true, even + // after the reclaimer does Decommit(), because the kernel may + // subsequently back the hugepage-sized region containing the + // decommitted page with a hugepage. However, it's consistent with our + // treatment of unallocated pages, which have the same property. + if val.refs == 0 { + continue + } + + // Get the range for this segment. As we touch slices, the + // Start value will be walked along. + r := seg.Range() + + var checkErr error + err := f.forEachMappingSlice(r, func(s []byte) { + if checkErr != nil { + return + } + + // Ensure that we have sufficient buffer for the call + // (one byte per page). The length of each slice must + // be page-aligned. + bufLen := len(s) / usermem.PageSize + if len(buf) < bufLen { + buf = make([]byte, bufLen) + } + + // Query for new pages in core. + if err := checkCommitted(s, buf); err != nil { + checkErr = err + return + } + + // Scan each page and switch out segments. + populatedRun := false + populatedRunStart := 0 + for i := 0; i <= bufLen; i++ { + // We run past the end of the slice here to + // simplify the logic and only set populated if + // we're still looking at elements. + populated := false + if i < bufLen { + populated = buf[i]&0x1 != 0 + } + + switch { + case populated == populatedRun: + // Keep the run going. + continue + case populated && !populatedRun: + // Begin the run. + populatedRun = true + populatedRunStart = i + // Keep going. + continue + case !populated && populatedRun: + // Finish the run by changing this segment. + runRange := platform.FileRange{ + Start: r.Start + uint64(populatedRunStart*usermem.PageSize), + End: r.Start + uint64(i*usermem.PageSize), + } + seg = f.usage.Isolate(seg, runRange) + seg.ValuePtr().knownCommitted = true + // Advance the segment only if we still + // have work to do in the context of + // the original segment from the for + // loop. Otherwise, the for loop itself + // will advance the segment + // appropriately. + if runRange.End != r.End { + seg = seg.NextSegment() + } + amount := runRange.Length() + usage.MemoryAccounting.Inc(amount, val.kind) + f.usageExpected += amount + changedAny = true + populatedRun = false + } + } + + // Advance r.Start. + r.Start += uint64(len(s)) + }) + if checkErr != nil { + return checkErr + } + if err != nil { + return err + } + } + + return nil +} + +// TotalUsage implements platform.Memory.TotalUsage. +func (f *FileMem) TotalUsage() (uint64, error) { + // Stat the underlying file to discover the underlying usage. stat(2) + // always reports the allocated block count in units of 512 bytes. This + // includes pages in the page cache and swapped pages. + var stat syscall.Stat_t + if err := syscall.Fstat(int(f.file.Fd()), &stat); err != nil { + return 0, err + } + return uint64(stat.Blocks * 512), nil +} + +// TotalSize implements platform.Memory.TotalSize. +func (f *FileMem) TotalSize() uint64 { + f.mu.Lock() + defer f.mu.Unlock() + return uint64(f.fileSize) +} + +// File returns the memory file used by f. +func (f *FileMem) File() *os.File { + return f.file +} + +// String implements fmt.Stringer.String. +// +// Note that because f.String locks f.mu, calling f.String internally +// (including indirectly through the fmt package) risks recursive locking. +// Within the filemem package, use f.usage directly instead. +func (f *FileMem) String() string { + f.mu.Lock() + defer f.mu.Unlock() + return f.usage.String() +} + +type usageSetFunctions struct{} + +func (usageSetFunctions) MinKey() uint64 { + return 0 +} + +func (usageSetFunctions) MaxKey() uint64 { + return math.MaxUint64 +} + +func (usageSetFunctions) ClearValue(val *usageInfo) { +} + +func (usageSetFunctions) Merge(_ platform.FileRange, val1 usageInfo, _ platform.FileRange, val2 usageInfo) (usageInfo, bool) { + return val1, val1 == val2 +} + +func (usageSetFunctions) Split(_ platform.FileRange, val usageInfo, _ uint64) (usageInfo, usageInfo) { + return val, val +} diff --git a/pkg/sentry/platform/filemem/filemem_state.go b/pkg/sentry/platform/filemem/filemem_state.go new file mode 100644 index 000000000..5dace8fec --- /dev/null +++ b/pkg/sentry/platform/filemem/filemem_state.go @@ -0,0 +1,170 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filemem + +import ( + "bytes" + "fmt" + "io" + "runtime" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/state" +) + +// SaveTo implements platform.Memory.SaveTo. +func (f *FileMem) SaveTo(w io.Writer) error { + // Wait for reclaim. + f.mu.Lock() + defer f.mu.Unlock() + for f.reclaimable { + f.reclaimCond.Signal() + f.mu.Unlock() + runtime.Gosched() + f.mu.Lock() + } + + // Ensure that all pages that contain data have knownCommitted set, since + // we only store knownCommitted pages below. + zeroPage := make([]byte, usermem.PageSize) + err := f.updateUsageLocked(0, func(bs []byte, committed []byte) error { + for pgoff := 0; pgoff < len(bs); pgoff += usermem.PageSize { + i := pgoff / usermem.PageSize + pg := bs[pgoff : pgoff+usermem.PageSize] + if !bytes.Equal(pg, zeroPage) { + committed[i] = 1 + continue + } + committed[i] = 0 + // Reading the page caused it to be committed; decommit it to + // reduce memory usage. + // + // "MADV_REMOVE [...] Free up a given range of pages and its + // associated backing store. This is equivalent to punching a hole + // in the corresponding byte range of the backing store (see + // fallocate(2))." - madvise(2) + if err := syscall.Madvise(pg, syscall.MADV_REMOVE); err != nil { + // This doesn't impact the correctness of saved memory, it + // just means that we're incrementally more likely to OOM. + // Complain, but don't abort saving. + log.Warningf("Decommitting page %p while saving failed: %v", pg, err) + } + } + return nil + }) + if err != nil { + return err + } + + // Save metadata. + if err := state.Save(w, &f.fileSize, nil); err != nil { + return err + } + if err := state.Save(w, &f.usage, nil); err != nil { + return err + } + + // Dump out committed pages. + for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + if !seg.Value().knownCommitted { + continue + } + // Write a header to distinguish from objects. + if err := state.WriteHeader(w, uint64(seg.Range().Length()), false); err != nil { + return err + } + // Write out data. + var ioErr error + err := f.forEachMappingSlice(seg.Range(), func(s []byte) { + if ioErr != nil { + return + } + _, ioErr = w.Write(s) + }) + if ioErr != nil { + return ioErr + } + if err != nil { + return err + } + + // Update accounting for restored pages. We need to do this here since + // these segments are marked as "known committed", and will be skipped + // over on accounting scans. + usage.MemoryAccounting.Inc(seg.Range().Length(), seg.Value().kind) + } + + return nil +} + +// LoadFrom implements platform.Memory.LoadFrom. +func (f *FileMem) LoadFrom(r io.Reader) error { + // Load metadata. + if err := state.Load(r, &f.fileSize, nil); err != nil { + return err + } + if err := f.file.Truncate(f.fileSize); err != nil { + return err + } + newMappings := make([]uintptr, f.fileSize>>chunkShift) + f.mappings.Store(newMappings) + if err := state.Load(r, &f.usage, nil); err != nil { + return err + } + + // Load committed pages. + for seg := f.usage.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { + if !seg.Value().knownCommitted { + continue + } + // Verify header. + length, object, err := state.ReadHeader(r) + if err != nil { + return err + } + if object { + // Not expected. + return fmt.Errorf("unexpected object") + } + if expected := uint64(seg.Range().Length()); length != expected { + // Size mismatch. + return fmt.Errorf("mismatched segment: expected %d, got %d", expected, length) + } + // Read data. + var ioErr error + err = f.forEachMappingSlice(seg.Range(), func(s []byte) { + if ioErr != nil { + return + } + _, ioErr = io.ReadFull(r, s) + }) + if ioErr != nil { + return ioErr + } + if err != nil { + return err + } + + // Update accounting for restored pages. We need to do this here since + // these segments are marked as "known committed", and will be skipped + // over on accounting scans. + usage.MemoryAccounting.Inc(seg.End()-seg.Start(), seg.Value().kind) + } + + return nil +} diff --git a/pkg/sentry/platform/filemem/filemem_test.go b/pkg/sentry/platform/filemem/filemem_test.go new file mode 100644 index 000000000..46ffcf116 --- /dev/null +++ b/pkg/sentry/platform/filemem/filemem_test.go @@ -0,0 +1,122 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filemem + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + page = usermem.PageSize + hugepage = usermem.HugePageSize +) + +func TestFindUnallocatedRange(t *testing.T) { + for _, test := range []struct { + desc string + usage *usageSegmentDataSlices + length uint64 + alignment uint64 + start uint64 + }{ + { + desc: "Initial allocation succeeds", + usage: &usageSegmentDataSlices{}, + length: page, + alignment: page, + start: 0, + }, + { + desc: "Allocation begins at start of file", + usage: &usageSegmentDataSlices{ + Start: []uint64{page}, + End: []uint64{2 * page}, + Values: []usageInfo{{refs: 1}}, + }, + length: page, + alignment: page, + start: 0, + }, + { + desc: "In-use frames are not allocatable", + usage: &usageSegmentDataSlices{ + Start: []uint64{0, page}, + End: []uint64{page, 2 * page}, + Values: []usageInfo{{refs: 1}, {refs: 2}}, + }, + length: page, + alignment: page, + start: 2 * page, + }, + { + desc: "Reclaimable frames are not allocatable", + usage: &usageSegmentDataSlices{ + Start: []uint64{0, page, 2 * page}, + End: []uint64{page, 2 * page, 3 * page}, + Values: []usageInfo{{refs: 1}, {refs: 0}, {refs: 1}}, + }, + length: page, + alignment: page, + start: 3 * page, + }, + { + desc: "Gaps between in-use frames are allocatable", + usage: &usageSegmentDataSlices{ + Start: []uint64{0, 2 * page}, + End: []uint64{page, 3 * page}, + Values: []usageInfo{{refs: 1}, {refs: 1}}, + }, + length: page, + alignment: page, + start: page, + }, + { + desc: "Inadequately-sized gaps are rejected", + usage: &usageSegmentDataSlices{ + Start: []uint64{0, 2 * page}, + End: []uint64{page, 3 * page}, + Values: []usageInfo{{refs: 1}, {refs: 1}}, + }, + length: 2 * page, + alignment: page, + start: 3 * page, + }, + { + desc: "Hugepage alignment is honored", + usage: &usageSegmentDataSlices{ + Start: []uint64{0, hugepage + page}, + // Hugepage-sized gap here that shouldn't be allocated from + // since it's incorrectly aligned. + End: []uint64{page, hugepage + 2*page}, + Values: []usageInfo{{refs: 1}, {refs: 1}}, + }, + length: hugepage, + alignment: hugepage, + start: 2 * hugepage, + }, + } { + t.Run(test.desc, func(t *testing.T) { + var usage usageSet + if err := usage.ImportSortedSlices(test.usage); err != nil { + t.Fatalf("Failed to initialize usage from %v: %v", test.usage, err) + } + if got, want := findUnallocatedRange(&usage, test.length, test.alignment), test.start; got != want { + t.Errorf("findUnallocatedRange(%v, %d, %d): got %d, wanted %d", test.usage, test.length, test.alignment, got, want) + } + }) + } +} diff --git a/pkg/sentry/platform/filemem/filemem_unsafe.go b/pkg/sentry/platform/filemem/filemem_unsafe.go new file mode 100644 index 000000000..a23b9825a --- /dev/null +++ b/pkg/sentry/platform/filemem/filemem_unsafe.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filemem + +import ( + "reflect" + "syscall" + "unsafe" +) + +func unsafeSlice(addr uintptr, length int) (slice []byte) { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + sh.Data = addr + sh.Len = length + sh.Cap = length + return +} + +func mincore(s []byte, buf []byte) error { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_MINCORE, + uintptr(unsafe.Pointer(&s[0])), + uintptr(len(s)), + uintptr(unsafe.Pointer(&buf[0]))); errno != 0 { + return errno + } + return nil +} diff --git a/pkg/sentry/platform/interrupt/BUILD b/pkg/sentry/platform/interrupt/BUILD new file mode 100644 index 000000000..33dde2a31 --- /dev/null +++ b/pkg/sentry/platform/interrupt/BUILD @@ -0,0 +1,19 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "interrupt", + srcs = [ + "interrupt.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/interrupt", + visibility = ["//pkg/sentry:internal"], +) + +go_test( + name = "interrupt_test", + size = "small", + srcs = ["interrupt_test.go"], + embed = [":interrupt"], +) diff --git a/pkg/sentry/platform/interrupt/interrupt.go b/pkg/sentry/platform/interrupt/interrupt.go new file mode 100644 index 000000000..ca4f42087 --- /dev/null +++ b/pkg/sentry/platform/interrupt/interrupt.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package interrupt provides an interrupt helper. +package interrupt + +import ( + "fmt" + "sync" +) + +// Receiver receives interrupt notifications from a Forwarder. +type Receiver interface { + // NotifyInterrupt is called when the Receiver receives an interrupt. + NotifyInterrupt() +} + +// Forwarder is a helper for delivering delayed signal interruptions. +// +// This helps platform implementations with Interrupt semantics. +type Forwarder struct { + // mu protects the below. + mu sync.Mutex + + // dst is the function to be called when NotifyInterrupt() is called. If + // dst is nil, pending will be set instead, causing the next call to + // Enable() to return false. + dst Receiver + pending bool +} + +// Enable attempts to enable interrupt forwarding to r. If f has already +// received an interrupt, Enable does nothing and returns false. Otherwise, +// future calls to f.NotifyInterrupt() cause r.NotifyInterrupt() to be called, +// and Enable returns true. +// +// Usage: +// +// if !f.Enable(r) { +// // There was an interrupt. +// return +// } +// defer f.Disable() +// +// Preconditions: r must not be nil. f must not already be forwarding +// interrupts to a Receiver. +func (f *Forwarder) Enable(r Receiver) bool { + if r == nil { + panic("nil Receiver") + } + f.mu.Lock() + if f.dst != nil { + f.mu.Unlock() + panic(fmt.Sprintf("already forwarding interrupts to %+v", f.dst)) + } + if f.pending { + f.pending = false + f.mu.Unlock() + return false + } + f.dst = r + f.mu.Unlock() + return true +} + +// Disable stops interrupt forwarding. If interrupt forwarding is already +// disabled, Disable is a no-op. +func (f *Forwarder) Disable() { + f.mu.Lock() + f.dst = nil + f.mu.Unlock() +} + +// NotifyInterrupt implements Receiver.NotifyInterrupt. If interrupt forwarding +// is enabled, the configured Receiver will be notified. Otherwise the +// interrupt will be delivered to the next call to Enable. +func (f *Forwarder) NotifyInterrupt() { + f.mu.Lock() + if f.dst != nil { + f.dst.NotifyInterrupt() + } else { + f.pending = true + } + f.mu.Unlock() +} diff --git a/pkg/sentry/platform/interrupt/interrupt_test.go b/pkg/sentry/platform/interrupt/interrupt_test.go new file mode 100644 index 000000000..7c49eeea6 --- /dev/null +++ b/pkg/sentry/platform/interrupt/interrupt_test.go @@ -0,0 +1,99 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interrupt + +import ( + "testing" +) + +type countingReceiver struct { + interrupts int +} + +// NotifyInterrupt implements Receiver.NotifyInterrupt. +func (r *countingReceiver) NotifyInterrupt() { + r.interrupts++ +} + +func TestSingleInterruptBeforeEnable(t *testing.T) { + var ( + f Forwarder + r countingReceiver + ) + f.NotifyInterrupt() + // The interrupt should cause the first Enable to fail. + if f.Enable(&r) { + f.Disable() + t.Fatalf("Enable: got true, wanted false") + } + // The failing Enable "acknowledges" the interrupt, allowing future Enables + // to succeed. + if !f.Enable(&r) { + t.Fatalf("Enable: got false, wanted true") + } + f.Disable() +} + +func TestMultipleInterruptsBeforeEnable(t *testing.T) { + var ( + f Forwarder + r countingReceiver + ) + f.NotifyInterrupt() + f.NotifyInterrupt() + // The interrupts should cause the first Enable to fail. + if f.Enable(&r) { + f.Disable() + t.Fatalf("Enable: got true, wanted false") + } + // Interrupts are deduplicated while the Forwarder is disabled, so the + // failing Enable "acknowledges" all interrupts, allowing future Enables to + // succeed. + if !f.Enable(&r) { + t.Fatalf("Enable: got false, wanted true") + } + f.Disable() +} + +func TestSingleInterruptAfterEnable(t *testing.T) { + var ( + f Forwarder + r countingReceiver + ) + if !f.Enable(&r) { + t.Fatalf("Enable: got false, wanted true") + } + defer f.Disable() + f.NotifyInterrupt() + if r.interrupts != 1 { + t.Errorf("interrupts: got %d, wanted 1", r.interrupts) + } +} + +func TestMultipleInterruptsAfterEnable(t *testing.T) { + var ( + f Forwarder + r countingReceiver + ) + if !f.Enable(&r) { + t.Fatalf("Enable: got false, wanted true") + } + defer f.Disable() + f.NotifyInterrupt() + f.NotifyInterrupt() + if r.interrupts != 2 { + t.Errorf("interrupts: got %d, wanted 2", r.interrupts) + } +} diff --git a/pkg/sentry/platform/kvm/BUILD b/pkg/sentry/platform/kvm/BUILD new file mode 100644 index 000000000..d902e344a --- /dev/null +++ b/pkg/sentry/platform/kvm/BUILD @@ -0,0 +1,90 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "host_map_set", + out = "host_map_set.go", + consts = { + "minDegree": "15", + }, + imports = { + "usermem": "gvisor.googlesource.com/gvisor/pkg/sentry/usermem", + }, + package = "kvm", + prefix = "hostMap", + template = "//pkg/segment:generic_set", + types = { + "Key": "usermem.Addr", + "Range": "usermem.AddrRange", + "Value": "uintptr", + "Functions": "hostMapSetFunctions", + }, +) + +go_library( + name = "kvm", + srcs = [ + "address_space.go", + "bluepill.go", + "bluepill_amd64.go", + "bluepill_amd64.s", + "bluepill_amd64_unsafe.go", + "bluepill_fault.go", + "bluepill_unsafe.go", + "context.go", + "host_map.go", + "host_map_set.go", + "kvm.go", + "kvm_amd64.go", + "kvm_amd64_unsafe.go", + "kvm_const.go", + "machine.go", + "machine_amd64.go", + "machine_amd64_unsafe.go", + "machine_unsafe.go", + "physical_map.go", + "virtual_map.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/kvm", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/cpuid", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/platform", + "//pkg/sentry/platform/filemem", + "//pkg/sentry/platform/interrupt", + "//pkg/sentry/platform/procid", + "//pkg/sentry/platform/ring0", + "//pkg/sentry/platform/ring0/pagetables", + "//pkg/sentry/platform/safecopy", + "//pkg/sentry/time", + "//pkg/sentry/usermem", + "//pkg/tmutex", + ], +) + +go_test( + name = "kvm_test", + size = "small", + srcs = [ + "kvm_test.go", + "virtual_map_test.go", + ], + embed = [":kvm"], + tags = [ + "nogotsan", + "requires-kvm", + ], + deps = [ + "//pkg/sentry/arch", + "//pkg/sentry/platform", + "//pkg/sentry/platform/kvm/testutil", + "//pkg/sentry/platform/ring0", + "//pkg/sentry/platform/ring0/pagetables", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/platform/kvm/address_space.go b/pkg/sentry/platform/kvm/address_space.go new file mode 100644 index 000000000..791f038b0 --- /dev/null +++ b/pkg/sentry/platform/kvm/address_space.go @@ -0,0 +1,207 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "reflect" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/filemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// addressSpace is a wrapper for PageTables. +type addressSpace struct { + platform.NoAddressSpaceIO + + // filemem is the memory instance. + filemem *filemem.FileMem + + // machine is the underlying machine. + machine *machine + + // pageTables are for this particular address space. + pageTables *pagetables.PageTables + + // dirtySet is the set of dirty vCPUs. + // + // The key is the vCPU, the value is a shared uint32 pointer that + // indicates whether or not the context is clean. A zero here indicates + // that the context should be cleaned prior to re-entry. + dirtySet sync.Map + + // files contains files mapped in the host address space. + files hostMap +} + +// Invalidate interrupts all dirty contexts. +func (as *addressSpace) Invalidate() { + as.dirtySet.Range(func(key, value interface{}) bool { + c := key.(*vCPU) + v := value.(*uint32) + atomic.StoreUint32(v, 0) // Invalidation required. + c.Bounce() // Force a kernel transition. + return true // Keep iterating. + }) +} + +// Touch adds the given vCPU to the dirty list. +func (as *addressSpace) Touch(c *vCPU) *uint32 { + value, ok := as.dirtySet.Load(c) + if !ok { + value, _ = as.dirtySet.LoadOrStore(c, new(uint32)) + } + return value.(*uint32) +} + +func (as *addressSpace) mapHost(addr usermem.Addr, m hostMapEntry, at usermem.AccessType) (inv bool) { + for m.length > 0 { + physical, length, ok := TranslateToPhysical(m.addr) + if !ok { + panic("unable to translate segment") + } + if length > m.length { + length = m.length + } + + // Ensure that this map has physical mappings. If the page does + // not have physical mappings, the KVM module may inject + // spurious exceptions when emulation fails (i.e. it tries to + // emulate because the RIP is pointed at those pages). + as.machine.mapPhysical(physical, length) + + // Install the page table mappings. Note that the ordering is + // important; if the pagetable mappings were installed before + // ensuring the physical pages were available, then some other + // thread could theoretically access them. + prev := as.pageTables.Map(addr, length, true /* user */, at, physical) + inv = inv || prev + m.addr += length + m.length -= length + addr += usermem.Addr(length) + } + + return inv +} + +func (as *addressSpace) mapHostFile(addr usermem.Addr, fd int, fr platform.FileRange, at usermem.AccessType) error { + // Create custom host mappings. + ms, err := as.files.CreateMappings(usermem.AddrRange{ + Start: addr, + End: addr + usermem.Addr(fr.End-fr.Start), + }, at, fd, fr.Start) + if err != nil { + return err + } + + inv := false + for _, m := range ms { + // The host mapped slices are guaranteed to be aligned. + inv = inv || as.mapHost(addr, m, at) + addr += usermem.Addr(m.length) + } + if inv { + as.Invalidate() + } + + return nil +} + +func (as *addressSpace) mapFilemem(addr usermem.Addr, fr platform.FileRange, at usermem.AccessType, precommit bool) error { + // TODO: Lock order at the platform level is not sufficiently + // well-defined to guarantee that the caller (FileMem.MapInto) is not + // holding any locks that FileMem.MapInternal may take. + + // Retrieve mappings for the underlying filemem. Note that the + // permissions here are largely irrelevant, since it corresponds to + // physical memory for the guest. We enforce the given access type + // below, in the guest page tables. + bs, err := as.filemem.MapInternal(fr, usermem.AccessType{ + Read: true, + Write: true, + }) + if err != nil { + return err + } + + // Save the original range for invalidation. + orig := usermem.AddrRange{ + Start: addr, + End: addr + usermem.Addr(fr.End-fr.Start), + } + + inv := false + for !bs.IsEmpty() { + b := bs.Head() + bs = bs.Tail() + // Since fr was page-aligned, b should also be page-aligned. We do the + // lookup in our host page tables for this translation. + s := b.ToSlice() + if precommit { + for i := 0; i < len(s); i += usermem.PageSize { + _ = s[i] // Touch to commit. + } + } + inv = inv || as.mapHost(addr, hostMapEntry{ + addr: reflect.ValueOf(&s[0]).Pointer(), + length: uintptr(len(s)), + }, at) + addr += usermem.Addr(len(s)) + } + if inv { + as.Invalidate() + as.files.DeleteMapping(orig) + } + + return nil +} + +// MapFile implements platform.AddressSpace.MapFile. +func (as *addressSpace) MapFile(addr usermem.Addr, fd int, fr platform.FileRange, at usermem.AccessType, precommit bool) error { + // Create an appropriate mapping. If this is filemem, we don't create + // custom mappings for each in-application mapping. For files however, + // we create distinct mappings for each address space. Unfortunately, + // there's not a better way to manage this here. The file underlying + // this fd can change at any time, so we can't actually index the file + // and share between address space. Oh well. It's all refering to the + // same physical pages, hopefully we don't run out of address space. + if fd != int(as.filemem.File().Fd()) { + // N.B. precommit is ignored for host files. + return as.mapHostFile(addr, fd, fr, at) + } + + return as.mapFilemem(addr, fr, at, precommit) +} + +// Unmap unmaps the given range by calling pagetables.PageTables.Unmap. +func (as *addressSpace) Unmap(addr usermem.Addr, length uint64) { + if prev := as.pageTables.Unmap(addr, uintptr(length)); prev { + as.Invalidate() + as.files.DeleteMapping(usermem.AddrRange{ + Start: addr, + End: addr + usermem.Addr(length), + }) + } +} + +// Release releases the page tables. +func (as *addressSpace) Release() error { + as.Unmap(0, ^uint64(0)) + as.pageTables.Release() + return nil +} diff --git a/pkg/sentry/platform/kvm/bluepill.go b/pkg/sentry/platform/kvm/bluepill.go new file mode 100644 index 000000000..ecc33d7dd --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "fmt" + "reflect" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy" +) + +// bluepill enters guest mode. +func bluepill(*vCPU) + +// sighandler is the signal entry point. +func sighandler() + +// savedHandler is a pointer to the previous handler. +// +// This is called by bluepillHandler. +var savedHandler uintptr + +func init() { + // Install the handler. + if err := safecopy.ReplaceSignalHandler(syscall.SIGSEGV, reflect.ValueOf(sighandler).Pointer(), &savedHandler); err != nil { + panic(fmt.Sprintf("Unable to set handler for signal %d: %v", syscall.SIGSEGV, err)) + } +} diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.go b/pkg/sentry/platform/kvm/bluepill_amd64.go new file mode 100644 index 000000000..a2baefb7d --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill_amd64.go @@ -0,0 +1,143 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" +) + +var ( + // bounceSignal is the signal used for bouncing KVM. + // + // We use SIGCHLD because it is not masked by the runtime, and + // it will be ignored properly by other parts of the kernel. + bounceSignal = syscall.SIGCHLD + + // bounceSignalMask has only bounceSignal set. + bounceSignalMask = uint64(1 << (uint64(bounceSignal) - 1)) + + // bounce is the interrupt vector used to return to the kernel. + bounce = uint32(ring0.VirtualizationException) +) + +// redpill on amd64 invokes a syscall with -1. +// +//go:nosplit +func redpill() { + syscall.RawSyscall(^uintptr(0), 0, 0, 0) +} + +// bluepillArchEnter is called during bluepillEnter. +// +//go:nosplit +func bluepillArchEnter(context *arch.SignalContext64) (c *vCPU) { + c = vCPUPtr(uintptr(context.Rax)) + regs := c.CPU.Registers() + regs.R8 = context.R8 + regs.R9 = context.R9 + regs.R10 = context.R10 + regs.R11 = context.R11 + regs.R12 = context.R12 + regs.R13 = context.R13 + regs.R14 = context.R14 + regs.R15 = context.R15 + regs.Rdi = context.Rdi + regs.Rsi = context.Rsi + regs.Rbp = context.Rbp + regs.Rbx = context.Rbx + regs.Rdx = context.Rdx + regs.Rax = context.Rax + regs.Rcx = context.Rcx + regs.Rsp = context.Rsp + regs.Rip = context.Rip + regs.Eflags = context.Eflags + regs.Eflags &^= uint64(ring0.KernelFlagsClear) + regs.Eflags |= ring0.KernelFlagsSet + regs.Cs = uint64(ring0.Kcode) + regs.Ds = uint64(ring0.Udata) + regs.Es = uint64(ring0.Udata) + regs.Fs = uint64(ring0.Udata) + regs.Ss = uint64(ring0.Kdata) + + // ring0 uses GS exclusively, so we use GS_base to store the location + // of the floating point address. + // + // The address will be restored directly after running the VCPU, and + // will be saved again prior to halting. We rely on the fact that the + // SaveFloatingPointer/LoadFloatingPoint functions use the most + // efficient mechanism available (including compression) so the state + // size is guaranteed to be less than what's pointed to here. + regs.Gs_base = uint64(context.Fpstate) + return +} + +// bluepillSyscall handles kernel syscalls. +// +//go:nosplit +func bluepillSyscall() { + regs := ring0.Current().Registers() + if regs.Rax != ^uint64(0) { + regs.Rip -= 2 // Rewind. + } + ring0.SaveFloatingPoint(bytePtr(uintptr(regs.Gs_base))) + ring0.Halt() + ring0.LoadFloatingPoint(bytePtr(uintptr(regs.Gs_base))) +} + +// bluepillException handles kernel exceptions. +// +//go:nosplit +func bluepillException(vector ring0.Vector) { + regs := ring0.Current().Registers() + if vector == ring0.Vector(bounce) { + // These should not interrupt kernel execution; point the Rip + // to zero to ensure that we get a reasonable panic when we + // attempt to return. + regs.Rip = 0 + } + ring0.SaveFloatingPoint(bytePtr(uintptr(regs.Gs_base))) + ring0.Halt() + ring0.LoadFloatingPoint(bytePtr(uintptr(regs.Gs_base))) +} + +// bluepillArchExit is called during bluepillEnter. +// +//go:nosplit +func bluepillArchExit(c *vCPU, context *arch.SignalContext64) { + regs := c.CPU.Registers() + context.R8 = regs.R8 + context.R9 = regs.R9 + context.R10 = regs.R10 + context.R11 = regs.R11 + context.R12 = regs.R12 + context.R13 = regs.R13 + context.R14 = regs.R14 + context.R15 = regs.R15 + context.Rdi = regs.Rdi + context.Rsi = regs.Rsi + context.Rbp = regs.Rbp + context.Rbx = regs.Rbx + context.Rdx = regs.Rdx + context.Rax = regs.Rax + context.Rcx = regs.Rcx + context.Rsp = regs.Rsp + context.Rip = regs.Rip + context.Eflags = regs.Eflags +} diff --git a/pkg/sentry/platform/kvm/bluepill_amd64.s b/pkg/sentry/platform/kvm/bluepill_amd64.s new file mode 100644 index 000000000..0881bd5f5 --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill_amd64.s @@ -0,0 +1,87 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// VCPU_CPU is the location of the CPU in the vCPU struct. +// +// This is guaranteed to be zero. +#define VCPU_CPU 0x0 + +// CPU_SELF is the self reference in ring0's percpu. +// +// This is guaranteed to be zero. +#define CPU_SELF 0x0 + +// Context offsets. +// +// Only limited use of the context is done in the assembly stub below, most is +// done in the Go handlers. However, the RIP must be examined. +#define CONTEXT_RAX 0x90 +#define CONTEXT_RIP 0xa8 +#define CONTEXT_FP 0xe0 + +// CLI is the literal byte for the disable interrupts instruction. +// +// This is checked as the source of the fault. +#define CLI $0xfa + +// See bluepill.go. +TEXT ·bluepill(SB),NOSPLIT,$0 +begin: + MOVQ vcpu+0(FP), AX + LEAQ VCPU_CPU(AX), BX + BYTE CLI; +check_vcpu: + MOVQ CPU_SELF(GS), CX + CMPQ BX, CX + JE right_vCPU +wrong_vcpu: + CALL ·redpill(SB) + JMP begin +right_vCPU: + RET + +// sighandler: see bluepill.go for documentation. +// +// The arguments are the following: +// +// DI - The signal number. +// SI - Pointer to siginfo_t structure. +// DX - Pointer to ucontext structure. +// +TEXT ·sighandler(SB),NOSPLIT,$0 + // Check if the signal is from the kernel. + MOVQ $0x80, CX + CMPL CX, 0x8(SI) + JNE fallback + + // Check if RIP is disable interrupts. + MOVQ CONTEXT_RIP(DX), CX + CMPQ CX, $0x0 + JE fallback + CMPB 0(CX), CLI + JNE fallback + + // Call the bluepillHandler. + PUSHQ DX // First argument (context). + CALL ·bluepillHandler(SB) // Call the handler. + POPQ DX // Discard the argument. + RET + +fallback: + // Jump to the previous signal handler. + XORQ CX, CX + MOVQ ·savedHandler(SB), AX + JMP AX diff --git a/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go new file mode 100644 index 000000000..61ca61dcb --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill_amd64_unsafe.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" +) + +// bluepillArchContext returns the arch-specific context. +func bluepillArchContext(context unsafe.Pointer) *arch.SignalContext64 { + return &((*arch.UContext64)(context).MContext) +} diff --git a/pkg/sentry/platform/kvm/bluepill_fault.go b/pkg/sentry/platform/kvm/bluepill_fault.go new file mode 100644 index 000000000..7c8c7bc37 --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill_fault.go @@ -0,0 +1,127 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // faultBlockSize is the size used for servicing memory faults. + // + // This should be large enough to avoid frequent faults and avoid using + // all available KVM slots (~512), but small enough that KVM does not + // complain about slot sizes (~4GB). See handleBluepillFault for how + // this block is used. + faultBlockSize = 2 << 30 + + // faultBlockMask is the mask for the fault blocks. + // + // This must be typed to avoid overflow complaints (ugh). + faultBlockMask = ^uintptr(faultBlockSize - 1) +) + +// yield yields the CPU. +// +//go:nosplit +func yield() { + syscall.RawSyscall(syscall.SYS_SCHED_YIELD, 0, 0, 0) +} + +// calculateBluepillFault calculates the fault address range. +// +//go:nosplit +func calculateBluepillFault(m *machine, physical uintptr) (virtualStart, physicalStart, length uintptr, ok bool) { + alignedPhysical := physical &^ uintptr(usermem.PageSize-1) + for _, pr := range physicalRegions { + end := pr.physical + pr.length + if physical < pr.physical || physical >= end { + continue + } + + // Adjust the block to match our size. + physicalStart = alignedPhysical & faultBlockMask + if physicalStart < pr.physical { + // Bound the starting point to the start of the region. + physicalStart = pr.physical + } + virtualStart = pr.virtual + (physicalStart - pr.physical) + physicalEnd := physicalStart + faultBlockSize + if physicalEnd > end { + physicalEnd = end + } + length = physicalEnd - physicalStart + return virtualStart, physicalStart, length, true + } + + return 0, 0, 0, false +} + +// handleBluepillFault handles a physical fault. +// +// The corresponding virtual address is returned. This may throw on error. +// +//go:nosplit +func handleBluepillFault(m *machine, physical uintptr) (uintptr, bool) { + // Paging fault: we need to map the underlying physical pages for this + // fault. This all has to be done in this function because we're in a + // signal handler context. (We can't call any functions that might + // split the stack.) + virtualStart, physicalStart, length, ok := calculateBluepillFault(m, physical) + if !ok { + return 0, false + } + + // Set the KVM slot. + // + // First, we need to acquire the exclusive right to set a slot. See + // machine.nextSlot for information about the protocol. + slot := atomic.SwapUint32(&m.nextSlot, ^uint32(0)) + for slot == ^uint32(0) { + yield() // Race with another call. + slot = atomic.SwapUint32(&m.nextSlot, ^uint32(0)) + } + errno := m.setMemoryRegion(int(slot), physicalStart, length, virtualStart) + if errno == 0 { + // Successfully added region; we can increment nextSlot and + // allow another set to proceed here. + atomic.StoreUint32(&m.nextSlot, slot+1) + return virtualStart + (physical - physicalStart), true + } + + // Release our slot (still available). + atomic.StoreUint32(&m.nextSlot, slot) + + switch errno { + case syscall.EEXIST: + // The region already exists. It's possible that we raced with + // another vCPU here. We just revert nextSlot and return true, + // because this must have been satisfied by some other vCPU. + return virtualStart + (physical - physicalStart), true + case syscall.EINVAL: + throw("set memory region failed; out of slots") + case syscall.ENOMEM: + throw("set memory region failed: out of memory") + case syscall.EFAULT: + throw("set memory region failed: invalid physical range") + default: + throw("set memory region failed: unknown reason") + } + + panic("unreachable") +} diff --git a/pkg/sentry/platform/kvm/bluepill_unsafe.go b/pkg/sentry/platform/kvm/bluepill_unsafe.go new file mode 100644 index 000000000..85703ff18 --- /dev/null +++ b/pkg/sentry/platform/kvm/bluepill_unsafe.go @@ -0,0 +1,175 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "sync/atomic" + "syscall" + "unsafe" +) + +//go:linkname throw runtime.throw +func throw(string) + +// vCPUPtr returns a CPU for the given address. +// +//go:nosplit +func vCPUPtr(addr uintptr) *vCPU { + return (*vCPU)(unsafe.Pointer(addr)) +} + +// bytePtr returns a bytePtr for the given address. +// +//go:nosplit +func bytePtr(addr uintptr) *byte { + return (*byte)(unsafe.Pointer(addr)) +} + +// bluepillHandler is called from the signal stub. +// +// The world may be stopped while this is executing, and it executes on the +// signal stack. It should only execute raw system calls and functions that are +// explicitly marked go:nosplit. +// +//go:nosplit +func bluepillHandler(context unsafe.Pointer) { + // Sanitize the registers; interrupts must always be disabled. + c := bluepillArchEnter(bluepillArchContext(context)) + + // Increment the number of switches. + atomic.AddUint32(&c.switches, 1) + + // Store vCPUGuest. + // + // This is fine even if we're not in guest mode yet. In this signal + // handler, we'll already have all the relevant signals blocked, so an + // interrupt is only deliverable when we actually execute the KVM_RUN. + // + // The state will be returned to vCPUReady by Phase2. + if state := atomic.SwapUintptr(&c.state, vCPUGuest); state != vCPUReady { + throw("vCPU not in ready state") + } + + for { + _, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(c.fd), _KVM_RUN, 0) + if errno == syscall.EINTR { + // First, we process whatever pending signal + // interrupted KVM. Since we're in a signal handler + // currently, all signals are masked and the signal + // must have been delivered directly to this thread. + sig, _, errno := syscall.RawSyscall6( + syscall.SYS_RT_SIGTIMEDWAIT, + uintptr(unsafe.Pointer(&bounceSignalMask)), + 0, // siginfo. + 0, // timeout. + 8, // sigset size. + 0, 0) + if errno != 0 { + throw("error waiting for pending signal") + } + if sig != uintptr(bounceSignal) { + throw("unexpected signal") + } + + // Check whether the current state of the vCPU is ready + // for interrupt injection. Because we don't have a + // PIC, we can't inject an interrupt while they are + // masked. We need to request a window if it's not + // ready. + if c.runData.readyForInterruptInjection == 0 { + c.runData.requestInterruptWindow = 1 + continue // Rerun vCPU. + } else { + // Force injection below; the vCPU is ready. + c.runData.exitReason = _KVM_EXIT_IRQ_WINDOW_OPEN + } + } else if errno != 0 { + throw("run failed") + } + + switch c.runData.exitReason { + case _KVM_EXIT_EXCEPTION: + throw("exception") + case _KVM_EXIT_IO: + throw("I/O") + case _KVM_EXIT_INTERNAL_ERROR: + throw("internal error") + case _KVM_EXIT_HYPERCALL: + throw("hypercall") + case _KVM_EXIT_DEBUG: + throw("debug") + case _KVM_EXIT_HLT: + // Copy out registers. + bluepillArchExit(c, bluepillArchContext(context)) + + // Notify any waiters. + switch state := atomic.SwapUintptr(&c.state, vCPUReady); state { + case vCPUGuest: + case vCPUWaiter: + c.notify() // Safe from handler. + default: + throw("invalid state") + } + return + case _KVM_EXIT_MMIO: + // Increment the fault count. + atomic.AddUint32(&c.faults, 1) + + // For MMIO, the physical address is the first data item. + virtual, ok := handleBluepillFault(c.machine, uintptr(c.runData.data[0])) + if !ok { + throw("physical address not valid") + } + + // We now need to fill in the data appropriately. KVM + // expects us to provide the result of the given MMIO + // operation in the runData struct. This is safe + // because, if a fault occurs here, the same fault + // would have occurred in guest mode. The kernel should + // not create invalid page table mappings. + data := (*[8]byte)(unsafe.Pointer(&c.runData.data[1])) + length := (uintptr)((uint32)(c.runData.data[2])) + write := (uint8)((c.runData.data[2] >> 32 & 0xff)) != 0 + for i := uintptr(0); i < length; i++ { + b := bytePtr(uintptr(virtual) + i) + if write { + // Write to the given address. + *b = data[i] + } else { + // Read from the given address. + data[i] = *b + } + } + case _KVM_EXIT_IRQ_WINDOW_OPEN: + // Interrupt: we must have requested an interrupt + // window; set the interrupt line. + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_INTERRUPT, + uintptr(unsafe.Pointer(&bounce))); errno != 0 { + throw("interrupt injection failed") + } + // Clear previous injection request. + c.runData.requestInterruptWindow = 0 + case _KVM_EXIT_SHUTDOWN: + throw("shutdown") + case _KVM_EXIT_FAIL_ENTRY: + throw("entry failed") + default: + throw("unknown failure") + } + } +} diff --git a/pkg/sentry/platform/kvm/context.go b/pkg/sentry/platform/kvm/context.go new file mode 100644 index 000000000..fd04a2c47 --- /dev/null +++ b/pkg/sentry/platform/kvm/context.go @@ -0,0 +1,81 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/interrupt" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// context is an implementation of the platform context. +// +// This is a thin wrapper around the machine. +type context struct { + // machine is the parent machine, and is immutable. + machine *machine + + // interrupt is the interrupt context. + interrupt interrupt.Forwarder +} + +// Switch runs the provided context in the given address space. +func (c *context) Switch(as platform.AddressSpace, ac arch.Context, _ int32) (*arch.SignalInfo, usermem.AccessType, error) { + // Extract data. + localAS := as.(*addressSpace) + regs := &ac.StateData().Regs + fp := (*byte)(ac.FloatingPointData()) + + // Grab a vCPU. + cpu, err := c.machine.Get() + if err != nil { + return nil, usermem.NoAccess, err + } + + // Enable interrupts (i.e. calls to vCPU.Notify). + if !c.interrupt.Enable(cpu) { + c.machine.Put(cpu) // Already preempted. + return nil, usermem.NoAccess, platform.ErrContextInterrupt + } + + // Mark the address space as dirty. + flags := ring0.Flags(0) + dirty := localAS.Touch(cpu) + if v := atomic.SwapUint32(dirty, 1); v == 0 { + flags |= ring0.FlagFlush + } + if ac.FullRestore() { + flags |= ring0.FlagFull + } + + // Take the blue pill. + si, at, err := cpu.SwitchToUser(regs, fp, localAS.pageTables, flags) + + // Release resources. + c.machine.Put(cpu) + + // All done. + c.interrupt.Disable() + return si, at, err +} + +// Interrupt interrupts the running context. +func (c *context) Interrupt() { + c.interrupt.NotifyInterrupt() +} diff --git a/pkg/sentry/platform/kvm/host_map.go b/pkg/sentry/platform/kvm/host_map.go new file mode 100644 index 000000000..357f8c92e --- /dev/null +++ b/pkg/sentry/platform/kvm/host_map.go @@ -0,0 +1,168 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type hostMap struct { + // mu protects below. + mu sync.RWMutex + + // set contains host mappings. + set hostMapSet +} + +type hostMapEntry struct { + addr uintptr + length uintptr +} + +func (hm *hostMap) forEachEntry(r usermem.AddrRange, fn func(offset uint64, m hostMapEntry)) { + for seg := hm.set.FindSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() { + length := uintptr(seg.Range().Length()) + segOffset := uint64(0) // Adjusted below. + if seg.End() > r.End { + length -= uintptr(seg.End() - r.End) + } + if seg.Start() < r.Start { + length -= uintptr(r.Start - seg.Start()) + } else { + segOffset = uint64(seg.Start() - r.Start) + } + fn(segOffset, hostMapEntry{ + addr: seg.Value(), + length: length, + }) + } +} + +func (hm *hostMap) createMappings(r usermem.AddrRange, at usermem.AccessType, fd int, offset uint64) (ms []hostMapEntry, err error) { + // Replace any existing mappings. + hm.forEachEntry(r, func(segOffset uint64, m hostMapEntry) { + _, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, + m.addr, + m.length, + uintptr(at.Prot()), + syscall.MAP_FIXED|syscall.MAP_SHARED, + uintptr(fd), + uintptr(offset+segOffset)) + if errno != 0 && err == nil { + err = errno + } + }) + if err != nil { + return nil, err + } + + // Add in necessary new mappings. + for gap := hm.set.FindGap(r.Start); gap.Ok() && gap.Start() < r.End; { + length := uintptr(gap.Range().Length()) + gapOffset := uint64(0) // Adjusted below. + if gap.End() > r.End { + length -= uintptr(gap.End() - r.End) + } + if gap.Start() < r.Start { + length -= uintptr(r.Start - gap.Start()) + } else { + gapOffset = uint64(gap.Start() - r.Start) + } + + // Map the host file memory. + hostAddr, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, + 0, + length, + uintptr(at.Prot()), + syscall.MAP_SHARED, + uintptr(fd), + uintptr(offset+gapOffset)) + if errno != 0 { + return nil, errno + } + + // Insert into the host set and move to the next gap. + gap = hm.set.Insert(gap, gap.Range().Intersect(r), hostAddr).NextGap() + } + + // Collect all slices. + hm.forEachEntry(r, func(_ uint64, m hostMapEntry) { + ms = append(ms, m) + }) + + return ms, nil +} + +// CreateMappings creates a new set of host mapping entries. +func (hm *hostMap) CreateMappings(r usermem.AddrRange, at usermem.AccessType, fd int, offset uint64) (ms []hostMapEntry, err error) { + hm.mu.Lock() + ms, err = hm.createMappings(r, at, fd, offset) + hm.mu.Unlock() + return +} + +func (hm *hostMap) deleteMapping(r usermem.AddrRange) { + // Remove all the existing mappings. + hm.forEachEntry(r, func(_ uint64, m hostMapEntry) { + _, _, errno := syscall.RawSyscall( + syscall.SYS_MUNMAP, + m.addr, + m.length, + 0) + if errno != 0 { + // Should never happen. + panic(fmt.Sprintf("unmap error: %v", errno)) + } + }) + + // Knock the range out. + hm.set.RemoveRange(r) +} + +// DeleteMapping deletes the given range. +func (hm *hostMap) DeleteMapping(r usermem.AddrRange) { + hm.mu.Lock() + hm.deleteMapping(r) + hm.mu.Unlock() +} + +// hostMapSetFunctions is used in the implementation of mapSet. +type hostMapSetFunctions struct{} + +func (hostMapSetFunctions) MinKey() usermem.Addr { return 0 } +func (hostMapSetFunctions) MaxKey() usermem.Addr { return ^usermem.Addr(0) } +func (hostMapSetFunctions) ClearValue(val *uintptr) { *val = 0 } + +func (hostMapSetFunctions) Merge(r1 usermem.AddrRange, addr1 uintptr, r2 usermem.AddrRange, addr2 uintptr) (uintptr, bool) { + if addr1+uintptr(r1.Length()) != addr2 { + return 0, false + } + + // Since the two regions are contiguous in both the key space and the + // value space, we can just store a single segment with the first host + // virtual address; the logic above operates based on the size of the + // segments. + return addr1, true +} + +func (hostMapSetFunctions) Split(r usermem.AddrRange, hostAddr uintptr, split usermem.Addr) (uintptr, uintptr) { + return hostAddr, hostAddr + uintptr(split-r.Start) +} diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go new file mode 100644 index 000000000..31928c9f0 --- /dev/null +++ b/pkg/sentry/platform/kvm/kvm.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kvm provides a kvm-based implementation of the platform interface. +package kvm + +import ( + "fmt" + "runtime" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/cpuid" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/filemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// KVM represents a lightweight VM context. +type KVM struct { + platform.NoCPUPreemptionDetection + + // filemem is our memory source. + *filemem.FileMem + + // machine is the backing VM. + machine *machine +} + +var ( + globalOnce sync.Once + globalErr error +) + +// New returns a new KVM-based implementation of the platform interface. +func New() (*KVM, error) { + // Allocate physical memory for the vCPUs. + fm, err := filemem.New("kvm-memory") + if err != nil { + return nil, err + } + + // Try opening KVM. + fd, err := syscall.Open("/dev/kvm", syscall.O_RDWR, 0) + if err != nil { + return nil, fmt.Errorf("opening /dev/kvm: %v", err) + } + defer syscall.Close(fd) + + // Ensure global initialization is done. + globalOnce.Do(func() { + physicalInit() + globalErr = updateSystemValues(fd) + ring0.Init(cpuid.HostFeatureSet()) + }) + if globalErr != nil { + return nil, err + } + + // Create a new VM fd. + vm, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd), _KVM_CREATE_VM, 0) + if errno != 0 { + return nil, fmt.Errorf("creating VM: %v", errno) + } + + // Create a VM context. + machine, err := newMachine(int(vm), runtime.NumCPU()) + if err != nil { + return nil, err + } + + // All set. + return &KVM{ + FileMem: fm, + machine: machine, + }, nil +} + +// SupportsAddressSpaceIO implements platform.Platform.SupportsAddressSpaceIO. +func (*KVM) SupportsAddressSpaceIO() bool { + return false +} + +// CooperativelySchedulesAddressSpace implements platform.Platform.CooperativelySchedulesAddressSpace. +func (*KVM) CooperativelySchedulesAddressSpace() bool { + return false +} + +// MapUnit implements platform.Platform.MapUnit. +func (*KVM) MapUnit() uint64 { + // We greedily creates PTEs in MapFile, so extremely large mappings can + // be expensive. Not _that_ expensive since we allow super pages, but + // even though can get out of hand if you're creating multi-terabyte + // mappings. For this reason, we limit mappings to an arbitrary 16MB. + return 16 << 20 +} + +// MinUserAddress returns the lowest available address. +func (*KVM) MinUserAddress() usermem.Addr { + return usermem.PageSize +} + +// MaxUserAddress returns the first address that may not be used. +func (*KVM) MaxUserAddress() usermem.Addr { + return usermem.Addr(ring0.MaximumUserAddress) +} + +// NewAddressSpace returns a new pagetable root. +func (k *KVM) NewAddressSpace(_ interface{}) (platform.AddressSpace, <-chan struct{}, error) { + // Allocate page tables and install system mappings. + pageTables := k.machine.kernel.PageTables.New() + applyPhysicalRegions(func(pr physicalRegion) bool { + // Map the kernel in the upper half. + kernelVirtual := usermem.Addr(ring0.KernelStartAddress | pr.virtual) + pageTables.Map(kernelVirtual, pr.length, false /* kernel */, usermem.AnyAccess, pr.physical) + return true // Keep iterating. + }) + + // Return the new address space. + return &addressSpace{ + filemem: k.FileMem, + machine: k.machine, + pageTables: pageTables, + }, nil, nil +} + +// NewContext returns an interruptible context. +func (k *KVM) NewContext() platform.Context { + return &context{ + machine: k.machine, + } +} + +// Memory returns the platform memory used to do allocations. +func (k *KVM) Memory() platform.Memory { + return k.FileMem +} diff --git a/pkg/sentry/platform/kvm/kvm_amd64.go b/pkg/sentry/platform/kvm/kvm_amd64.go new file mode 100644 index 000000000..3d56ed895 --- /dev/null +++ b/pkg/sentry/platform/kvm/kvm_amd64.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" +) + +// userMemoryRegion is a region of physical memory. +// +// This mirrors kvm_memory_region. +type userMemoryRegion struct { + slot uint32 + flags uint32 + guestPhysAddr uint64 + memorySize uint64 + userspaceAddr uint64 +} + +// userRegs represents KVM user registers. +// +// This mirrors kvm_regs. +type userRegs struct { + RAX uint64 + RBX uint64 + RCX uint64 + RDX uint64 + RSI uint64 + RDI uint64 + RSP uint64 + RBP uint64 + R8 uint64 + R9 uint64 + R10 uint64 + R11 uint64 + R12 uint64 + R13 uint64 + R14 uint64 + R15 uint64 + RIP uint64 + RFLAGS uint64 +} + +// systemRegs represents KVM system registers. +// +// This mirrors kvm_sregs. +type systemRegs struct { + CS segment + DS segment + ES segment + FS segment + GS segment + SS segment + TR segment + LDT segment + GDT descriptor + IDT descriptor + CR0 uint64 + CR2 uint64 + CR3 uint64 + CR4 uint64 + CR8 uint64 + EFER uint64 + apicBase uint64 + interruptBitmap [(_KVM_NR_INTERRUPTS + 63) / 64]uint64 +} + +// segment is the expanded form of a segment register. +// +// This mirrors kvm_segment. +type segment struct { + base uint64 + limit uint32 + selector uint16 + typ uint8 + present uint8 + DPL uint8 + DB uint8 + S uint8 + L uint8 + G uint8 + AVL uint8 + unusable uint8 + _ uint8 +} + +// Clear clears the segment and marks it unusable. +func (s *segment) Clear() { + *s = segment{unusable: 1} +} + +// selector is a segment selector. +type selector uint16 + +// tobool is a simple helper. +func tobool(x ring0.SegmentDescriptorFlags) uint8 { + if x != 0 { + return 1 + } + return 0 +} + +// Load loads the segment described by d into the segment s. +// +// The argument sel is recorded as the segment selector index. +func (s *segment) Load(d *ring0.SegmentDescriptor, sel ring0.Selector) { + flag := d.Flags() + if flag&ring0.SegmentDescriptorPresent == 0 { + s.Clear() + return + } + s.base = uint64(d.Base()) + s.limit = d.Limit() + s.typ = uint8((flag>>8)&0xF) | 1 + s.S = tobool(flag & ring0.SegmentDescriptorSystem) + s.DPL = uint8(d.DPL()) + s.present = tobool(flag & ring0.SegmentDescriptorPresent) + s.AVL = tobool(flag & ring0.SegmentDescriptorAVL) + s.L = tobool(flag & ring0.SegmentDescriptorLong) + s.DB = tobool(flag & ring0.SegmentDescriptorDB) + s.G = tobool(flag & ring0.SegmentDescriptorG) + if s.L != 0 { + s.limit = 0xffffffff + } + s.unusable = 0 + s.selector = uint16(sel) +} + +// descriptor describes a region of physical memory. +// +// It corresponds to the pseudo-descriptor used in the x86 LGDT and LIDT +// instructions, and mirrors kvm_dtable. +type descriptor struct { + base uint64 + limit uint16 + _ [3]uint16 +} + +// modelControlRegister is an MSR entry. +// +// This mirrors kvm_msr_entry. +type modelControlRegister struct { + index uint32 + _ uint32 + data uint64 +} + +// modelControlRegisers is a collection of MSRs. +// +// This mirrors kvm_msrs. +type modelControlRegisters struct { + nmsrs uint32 + _ uint32 + entries [16]modelControlRegister +} + +// runData is the run structure. This may be mapped for synchronous register +// access (although that doesn't appear to be supported by my kernel at least). +// +// This mirrors kvm_run. +type runData struct { + requestInterruptWindow uint8 + _ [7]uint8 + + exitReason uint32 + readyForInterruptInjection uint8 + ifFlag uint8 + _ [2]uint8 + + cr8 uint64 + apicBase uint64 + + // This is the union data for exits. Interpretation depends entirely on + // the exitReason above (see vCPU code for more information). + data [32]uint64 +} + +// cpuidEntry is a single CPUID entry. +// +// This mirrors kvm_cpuid_entry2. +type cpuidEntry struct { + function uint32 + index uint32 + flags uint32 + eax uint32 + ebx uint32 + ecx uint32 + edx uint32 + _ [3]uint32 +} + +// cpuidEntries is a collection of CPUID entries. +// +// This mirrors kvm_cpuid2. +type cpuidEntries struct { + nr uint32 + _ uint32 + entries [_KVM_NR_CPUID_ENTRIES]cpuidEntry +} diff --git a/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go new file mode 100644 index 000000000..389412d87 --- /dev/null +++ b/pkg/sentry/platform/kvm/kvm_amd64_unsafe.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "fmt" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" +) + +var ( + runDataSize int + hasGuestPCID bool + hasGuestINVPCID bool + pagetablesOpts pagetables.Opts + cpuidSupported = cpuidEntries{nr: _KVM_NR_CPUID_ENTRIES} +) + +func updateSystemValues(fd int) error { + // Extract the mmap size. + sz, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(fd), _KVM_GET_VCPU_MMAP_SIZE, 0) + if errno != 0 { + return fmt.Errorf("getting VCPU mmap size: %v", errno) + } + + // Save the data. + runDataSize = int(sz) + + // Must do the dance to figure out the number of entries. + _, _, errno = syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(fd), + _KVM_GET_SUPPORTED_CPUID, + uintptr(unsafe.Pointer(&cpuidSupported))) + if errno != 0 && errno != syscall.ENOMEM { + // Some other error occurred. + return fmt.Errorf("getting supported CPUID: %v", errno) + } + + // The number should now be correct. + _, _, errno = syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(fd), + _KVM_GET_SUPPORTED_CPUID, + uintptr(unsafe.Pointer(&cpuidSupported))) + if errno != 0 { + // Didn't work with the right number. + return fmt.Errorf("getting supported CPUID (2nd attempt): %v", errno) + } + + // Calculate whether guestPCID is supported. + // + // FIXME: These should go through the much more pleasant + // cpuid package interfaces, once a way to accept raw kvm CPUID entries + // is plumbed (or some rough equivalent). + for i := 0; i < int(cpuidSupported.nr); i++ { + entry := cpuidSupported.entries[i] + if entry.function == 1 && entry.index == 0 && entry.ecx&(1<<17) != 0 { + hasGuestPCID = true // Found matching PCID in guest feature set. + } + if entry.function == 7 && entry.index == 0 && entry.ebx&(1<<10) != 0 { + hasGuestINVPCID = true // Found matching INVPCID in guest feature set. + } + } + + // A basic sanity check: ensure that we don't attempt to + // invpcid if guest PCIDs are not supported; it's not clear + // what the semantics of this would be (or why some CPU or + // hypervisor would export this particular combination). + hasGuestINVPCID = hasGuestPCID && hasGuestINVPCID + + // Set the pagetables to use PCID if it's available. + pagetablesOpts.EnablePCID = hasGuestPCID + + // Success. + return nil +} diff --git a/pkg/sentry/platform/kvm/kvm_const.go b/pkg/sentry/platform/kvm/kvm_const.go new file mode 100644 index 000000000..0ec6a4a00 --- /dev/null +++ b/pkg/sentry/platform/kvm/kvm_const.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +// KVM ioctls. +// +// Only the ioctls we need in Go appear here; some additional ioctls are used +// within the assembly stubs (KVM_INTERRUPT, etc.). +const ( + _KVM_CREATE_VM = 0xae01 + _KVM_GET_VCPU_MMAP_SIZE = 0xae04 + _KVM_CREATE_VCPU = 0xae41 + _KVM_SET_TSS_ADDR = 0xae47 + _KVM_RUN = 0xae80 + _KVM_INTERRUPT = 0x4004ae86 + _KVM_SET_MSRS = 0x4008ae89 + _KVM_SET_USER_MEMORY_REGION = 0x4020ae46 + _KVM_SET_REGS = 0x4090ae82 + _KVM_SET_SREGS = 0x4138ae84 + _KVM_GET_SUPPORTED_CPUID = 0xc008ae05 + _KVM_SET_CPUID2 = 0x4008ae90 + _KVM_SET_SIGNAL_MASK = 0x4004ae8b +) + +// KVM exit reasons. +const ( + _KVM_EXIT_EXCEPTION = 0x1 + _KVM_EXIT_IO = 0x2 + _KVM_EXIT_HYPERCALL = 0x3 + _KVM_EXIT_DEBUG = 0x4 + _KVM_EXIT_HLT = 0x5 + _KVM_EXIT_MMIO = 0x6 + _KVM_EXIT_IRQ_WINDOW_OPEN = 0x7 + _KVM_EXIT_SHUTDOWN = 0x8 + _KVM_EXIT_FAIL_ENTRY = 0x9 + _KVM_EXIT_INTERNAL_ERROR = 0x11 +) + +// KVM limits. +const ( + _KVM_NR_VCPUS = 0x100 + _KVM_NR_INTERRUPTS = 0x100 + _KVM_NR_CPUID_ENTRIES = 0x100 +) diff --git a/pkg/sentry/platform/kvm/kvm_test.go b/pkg/sentry/platform/kvm/kvm_test.go new file mode 100644 index 000000000..61cfdd8fd --- /dev/null +++ b/pkg/sentry/platform/kvm/kvm_test.go @@ -0,0 +1,415 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "math/rand" + "reflect" + "syscall" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/kvm/testutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +var dummyFPState = (*byte)(arch.NewFloatingPointData()) + +type testHarness interface { + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) +} + +func kvmTest(t testHarness, setup func(*KVM), fn func(*vCPU) bool) { + // Create the machine. + k, err := New() + if err != nil { + t.Fatalf("error creating KVM instance: %v", err) + } + defer k.machine.Destroy() + defer k.FileMem.Destroy() + + // Call additional setup. + if setup != nil { + setup(k) + } + + var c *vCPU // For recovery. + defer func() { + redpill() + if c != nil { + k.machine.Put(c) + } + }() + for { + c, err = k.machine.Get() + if err != nil { + t.Fatalf("error getting vCPU: %v", err) + } + if !fn(c) { + break + } + + // We put the vCPU here and clear the value so that the + // deferred recovery will not re-put it above. + k.machine.Put(c) + c = nil + } +} + +func bluepillTest(t testHarness, fn func(*vCPU)) { + kvmTest(t, nil, func(c *vCPU) bool { + bluepill(c) + fn(c) + return false + }) +} + +func TestKernelSyscall(t *testing.T) { + bluepillTest(t, func(c *vCPU) { + redpill() // Leave guest mode. + if got := c.State(); got != vCPUReady { + t.Errorf("vCPU not in ready state: got %v", got) + } + }) +} + +func hostFault() { + defer func() { + recover() + }() + var foo *int + *foo = 0 +} + +func TestKernelFault(t *testing.T) { + hostFault() // Ensure recovery works. + bluepillTest(t, func(c *vCPU) { + hostFault() + if got := c.State(); got != vCPUReady { + t.Errorf("vCPU not in ready state: got %v", got) + } + }) +} + +func TestKernelFloatingPoint(t *testing.T) { + bluepillTest(t, func(c *vCPU) { + if !testutil.FloatingPointWorks() { + t.Errorf("floating point does not work, and it should!") + } + }) +} + +func applicationTest(t testHarness, useHostMappings bool, target func(), fn func(*vCPU, *syscall.PtraceRegs, *pagetables.PageTables) bool) { + // Initialize registers & page tables. + var ( + regs syscall.PtraceRegs + pt *pagetables.PageTables + ) + testutil.SetTestTarget(®s, target) + defer func() { + if pt != nil { + pt.Release() + } + }() + + kvmTest(t, func(k *KVM) { + // Create new page tables. + as, _, err := k.NewAddressSpace(nil /* invalidator */) + if err != nil { + t.Fatalf("can't create new address space: %v", err) + } + pt = as.(*addressSpace).pageTables + + if useHostMappings { + // Apply the physical mappings to these page tables. + // (This is normally dangerous, since they point to + // physical pages that may not exist. This shouldn't be + // done for regular user code, but is fine for test + // purposes.) + applyPhysicalRegions(func(pr physicalRegion) bool { + pt.Map(usermem.Addr(pr.virtual), pr.length, true /* user */, usermem.AnyAccess, pr.physical) + return true // Keep iterating. + }) + } + }, func(c *vCPU) bool { + // Invoke the function with the extra data. + return fn(c, ®s, pt) + }) +} + +func TestApplicationSyscall(t *testing.T) { + applicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); err != nil { + t.Errorf("application syscall with full restore failed: %v", err) + } + return false + }) + applicationTest(t, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != nil { + t.Errorf("application syscall with partial restore failed: %v", err) + } + return false + }) +} + +func TestApplicationFault(t *testing.T) { + applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTouchTarget(regs, nil) // Cause fault. + if si, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) { + t.Errorf("application fault with full restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal) + } + return false + }) + applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTouchTarget(regs, nil) // Cause fault. + if si, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != platform.ErrContextSignal || (si != nil && si.Signo != int32(syscall.SIGSEGV)) { + t.Errorf("application fault with partial restore got (%v, %v), expected (%v, SIGSEGV)", err, si, platform.ErrContextSignal) + } + return false + }) +} + +func TestRegistersSyscall(t *testing.T) { + applicationTest(t, true, testutil.TwiddleRegsSyscall, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTestRegs(regs) // Fill values for all registers. + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != nil { + t.Errorf("application register check with partial restore got unexpected error: %v", err) + } + if err := testutil.CheckTestRegs(regs, false); err != nil { + t.Errorf("application register check with partial restore failed: %v", err) + } + return false + }) +} + +func TestRegistersFault(t *testing.T) { + applicationTest(t, true, testutil.TwiddleRegsFault, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTestRegs(regs) // Fill values for all registers. + if si, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); err != platform.ErrContextSignal || si.Signo != int32(syscall.SIGSEGV) { + t.Errorf("application register check with full restore got unexpected error: %v", err) + } + if err := testutil.CheckTestRegs(regs, true); err != nil { + t.Errorf("application register check with full restore failed: %v", err) + } + return false + }) +} + +func TestSegments(t *testing.T) { + applicationTest(t, true, testutil.TwiddleSegments, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTestSegments(regs) + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); err != nil { + t.Errorf("application segment check with full restore got unexpected error: %v", err) + } + if err := testutil.CheckTestSegments(regs); err != nil { + t.Errorf("application segment check with full restore failed: %v", err) + } + return false + }) +} + +func TestBounce(t *testing.T) { + applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + go func() { + time.Sleep(time.Millisecond) + c.Bounce() + }() + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != platform.ErrContextInterrupt { + t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextInterrupt) + } + return false + }) + applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + go func() { + time.Sleep(time.Millisecond) + c.Bounce() + }() + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); err != platform.ErrContextInterrupt { + t.Errorf("application full restore: got %v, wanted %v", err, platform.ErrContextInterrupt) + } + return false + }) +} + +func TestBounceStress(t *testing.T) { + applicationTest(t, true, testutil.SpinLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + randomSleep := func() { + // O(hundreds of microseconds) is appropriate to ensure + // different overlaps and different schedules. + if n := rand.Intn(1000); n > 100 { + time.Sleep(time.Duration(n) * time.Microsecond) + } + } + for i := 0; i < 1000; i++ { + // Start an asynchronously executing goroutine that + // calls Bounce at pseudo-random point in time. + // This should wind up calling Bounce when the + // kernel is in various stages of the switch. + go func() { + randomSleep() + c.Bounce() + }() + randomSleep() + // Execute the switch. + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != platform.ErrContextInterrupt { + t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextInterrupt) + } + // Simulate work. + c.Unlock() + randomSleep() + c.Lock() + } + return false + }) +} + +func TestInvalidate(t *testing.T) { + var data uintptr // Used below. + applicationTest(t, true, testutil.Touch, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + testutil.SetTouchTarget(regs, &data) // Read legitimate value. + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != nil { + t.Errorf("application partial restore: got %v, wanted nil", err) + } + // Unmap the page containing data & invalidate. + pt.Unmap(usermem.Addr(reflect.ValueOf(&data).Pointer() & ^uintptr(usermem.PageSize-1)), usermem.PageSize) + c.Invalidate() // Ensure invalidation. + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != platform.ErrContextSignal { + t.Errorf("application partial restore: got %v, wanted %v", err, platform.ErrContextSignal) + } + return false + }) +} + +// IsFault returns true iff the given signal represents a fault. +func IsFault(err error, si *arch.SignalInfo) bool { + return err == platform.ErrContextSignal && si.Signo == int32(syscall.SIGSEGV) +} + +func TestEmptyAddressSpace(t *testing.T) { + applicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if si, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); !IsFault(err, si) { + t.Errorf("first fault with partial restore failed got %v", err) + t.Logf("registers: %#v", ®s) + } + return false + }) + applicationTest(t, false, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if si, _, err := c.SwitchToUser(regs, dummyFPState, pt, ring0.FlagFull); !IsFault(err, si) { + t.Errorf("first fault with full restore failed got %v", err) + t.Logf("registers: %#v", ®s) + } + return false + }) +} + +func TestWrongVCPU(t *testing.T) { + kvmTest(t, nil, func(c1 *vCPU) bool { + kvmTest(t, nil, func(c2 *vCPU) bool { + // Basic test, one then the other. + bluepill(c1) + bluepill(c2) + if c2.switches == 0 { + // Don't allow the test to proceed if this fails. + t.Fatalf("wrong vCPU#2 switches: vCPU1=%+v,vCPU2=%+v", c1, c2) + } + + // Alternate vCPUs; we expect to need to trigger the + // wrong vCPU path on each switch. + for i := 0; i < 100; i++ { + bluepill(c1) + bluepill(c2) + } + if count := c1.switches; count < 90 { + t.Errorf("wrong vCPU#1 switches: vCPU1=%+v,vCPU2=%+v", c1, c2) + } + if count := c2.switches; count < 90 { + t.Errorf("wrong vCPU#2 switches: vCPU1=%+v,vCPU2=%+v", c1, c2) + } + return false + }) + return false + }) + kvmTest(t, nil, func(c1 *vCPU) bool { + kvmTest(t, nil, func(c2 *vCPU) bool { + bluepill(c1) + bluepill(c2) + return false + }) + return false + }) +} + +func BenchmarkApplicationSyscall(b *testing.B) { + var ( + i int // Iteration includes machine.Get() / machine.Put(). + a int // Count for ErrContextInterrupt. + ) + applicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != nil { + if err == platform.ErrContextInterrupt { + a++ + return true // Ignore. + } + b.Fatalf("benchmark failed: %v", err) + } + i++ + return i < b.N + }) + if a != 0 { + b.Logf("ErrContextInterrupt occurred %d times (in %d iterations).", a, a+i) + } +} + +func BenchmarkKernelSyscall(b *testing.B) { + // Note that the target passed here is irrelevant, we never execute SwitchToUser. + applicationTest(b, true, testutil.Getpid, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + // iteration does not include machine.Get() / machine.Put(). + for i := 0; i < b.N; i++ { + testutil.Getpid() + } + return false + }) +} + +func BenchmarkWorldSwitchToUserRoundtrip(b *testing.B) { + // see BenchmarkApplicationSyscall. + var ( + i int + a int + ) + applicationTest(b, true, testutil.SyscallLoop, func(c *vCPU, regs *syscall.PtraceRegs, pt *pagetables.PageTables) bool { + if _, _, err := c.SwitchToUser(regs, dummyFPState, pt, 0); err != nil { + if err == platform.ErrContextInterrupt { + a++ + return true // Ignore. + } + b.Fatalf("benchmark failed: %v", err) + } + // This will intentionally cause the world switch. By executing + // a host syscall here, we force the transition between guest + // and host mode. + testutil.Getpid() + i++ + return i < b.N + }) + if a != 0 { + b.Logf("EAGAIN occurred %d times (in %d iterations).", a, a+i) + } +} diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go new file mode 100644 index 000000000..a5be0cee3 --- /dev/null +++ b/pkg/sentry/platform/kvm/machine.go @@ -0,0 +1,412 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "fmt" + "runtime" + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/tmutex" +) + +// machine contains state associated with the VM as a whole. +type machine struct { + // fd is the vm fd. + fd int + + // nextSlot is the next slot for setMemoryRegion. + // + // This must be accessed atomically. If nextSlot is ^uint32(0), then + // slots are currently being updated, and the caller should retry. + nextSlot uint32 + + // kernel is the set of global structures. + kernel *ring0.Kernel + + // mappingCache is used for mapPhysical. + mappingCache sync.Map + + // mu protects vCPUs. + mu sync.Mutex + + // vCPUs are the machine vCPUs. + // + // This is eventually keyed by system TID, but is initially indexed by + // the negative vCPU id. This is merely an optimization, so while + // collisions here are not possible, it wouldn't matter anyways. + vCPUs map[uint64]*vCPU +} + +const ( + // vCPUReady is the lock value for an available vCPU. + // + // Legal transitions: vCPUGuest (bluepill). + vCPUReady uintptr = iota + + // vCPUGuest indicates the vCPU is in guest mode. + // + // Legal transition: vCPUReady (bluepill), vCPUWaiter (wait). + vCPUGuest + + // vCPUWaiter indicates that the vCPU should be released. + // + // Legal transition: vCPUReady (bluepill). + vCPUWaiter +) + +// vCPU is a single KVM vCPU. +type vCPU struct { + // CPU is the kernel CPU data. + // + // This must be the first element of this structure, it is referenced + // by the bluepill code (see bluepill_amd64.s). + ring0.CPU + + // fd is the vCPU fd. + fd int + + // tid is the last set tid. + tid uint64 + + // switches is a count of world switches (informational only). + switches uint32 + + // faults is a count of world faults (informational only). + faults uint32 + + // state is the vCPU state; all are described above. + state uintptr + + // runData for this vCPU. + runData *runData + + // machine associated with this vCPU. + machine *machine + + // mu applies across get/put; it does not protect the above. + mu tmutex.Mutex +} + +// newMachine returns a new VM context. +func newMachine(vm int, vCPUs int) (*machine, error) { + // Create the machine. + m := &machine{ + fd: vm, + vCPUs: make(map[uint64]*vCPU), + } + if vCPUs > _KVM_NR_VCPUS { + // Hard cap at KVM's limit. + vCPUs = _KVM_NR_VCPUS + } + if n := 2 * runtime.NumCPU(); vCPUs > n { + // Cap at twice the number of physical cores. Otherwise we're + // just wasting memory and thrashing. (There may be scheduling + // issues when you've got > n active threads.) + vCPUs = n + } + m.kernel = ring0.New(ring0.KernelOpts{ + PageTables: pagetables.New(m, pagetablesOpts), + }) + + // Initialize architecture state. + if err := m.initArchState(vCPUs); err != nil { + m.Destroy() + return nil, err + } + + // Create all the vCPUs. + for id := 0; id < vCPUs; id++ { + // Create the vCPU. + fd, _, errno := syscall.RawSyscall(syscall.SYS_IOCTL, uintptr(vm), _KVM_CREATE_VCPU, uintptr(id)) + if errno != 0 { + m.Destroy() + return nil, fmt.Errorf("error creating VCPU: %v", errno) + } + c := &vCPU{ + fd: int(fd), + machine: m, + } + c.mu.Init() + c.CPU.Init(m.kernel) + c.CPU.KernelSyscall = bluepillSyscall + c.CPU.KernelException = bluepillException + m.vCPUs[uint64(-id)] = c // See above. + + // Ensure the signal mask is correct. + if err := c.setSignalMask(); err != nil { + m.Destroy() + return nil, err + } + + // Initialize architecture state. + if err := c.initArchState(); err != nil { + m.Destroy() + return nil, err + } + + // Map the run data. + runData, err := mapRunData(int(fd)) + if err != nil { + m.Destroy() + return nil, err + } + c.runData = runData + } + + // Apply the physical mappings. Note that these mappings may point to + // guest physical addresses that are not actually available. These + // physical pages are mapped on demand, see kernel_unsafe.go. + applyPhysicalRegions(func(pr physicalRegion) bool { + // Map everything in the lower half. + m.kernel.PageTables.Map(usermem.Addr(pr.virtual), pr.length, false /* kernel */, usermem.AnyAccess, pr.physical) + // And keep everything in the upper half. + kernelAddr := usermem.Addr(ring0.KernelStartAddress | pr.virtual) + m.kernel.PageTables.Map(kernelAddr, pr.length, false /* kernel */, usermem.AnyAccess, pr.physical) + return true // Keep iterating. + }) + + // Ensure that the currently mapped virtual regions are actually + // available in the VM. Note that this doesn't guarantee no future + // faults, however it should guarantee that everything is available to + // ensure successful vCPU entry. + applyVirtualRegions(func(vr virtualRegion) { + if excludeVirtualRegion(vr) { + return // skip region. + } + for virtual := vr.virtual; virtual < vr.virtual+vr.length; { + physical, length, ok := TranslateToPhysical(virtual) + if !ok { + // This must be an invalid region that was + // knocked out by creation of the physical map. + return + } + if virtual+length > vr.virtual+vr.length { + // Cap the length to the end of the area. + length = vr.virtual + vr.length - virtual + } + + // Ensure the physical range is mapped. + m.mapPhysical(physical, length) + virtual += length + } + }) + + // Ensure the machine is cleaned up properly. + runtime.SetFinalizer(m, (*machine).Destroy) + return m, nil +} + +// mapPhysical checks for the mapping of a physical range, and installs one if +// not available. This attempts to be efficient for calls in the hot path. +// +// This panics on error. +func (m *machine) mapPhysical(physical, length uintptr) { + for end := physical + length; physical < end; { + _, physicalStart, length, ok := calculateBluepillFault(m, physical) + if !ok { + // Should never happen. + panic("mapPhysical on unknown physical address") + } + + if _, ok := m.mappingCache.LoadOrStore(physicalStart, true); !ok { + // Not present in the cache; requires setting the slot. + if _, ok := handleBluepillFault(m, physical); !ok { + panic("handleBluepillFault failed") + } + } + + // Move to the next chunk. + physical = physicalStart + length + } +} + +// Destroy frees associated resources. +// +// Destroy should only be called once all active users of the machine are gone. +// The machine object should not be used after calling Destroy. +// +// Precondition: all vCPUs must be returned to the machine. +func (m *machine) Destroy() { + runtime.SetFinalizer(m, nil) + + // Destroy vCPUs. + for _, c := range m.vCPUs { + // Ensure the vCPU is not still running in guest mode. This is + // possible iff teardown has been done by other threads, and + // somehow a single thread has not executed any system calls. + c.wait() + + // Teardown the vCPU itself. + switch state := c.State(); state { + case vCPUReady: + // Note that the runData may not be mapped if an error + // occurs during the middle of initialization. + if c.runData != nil { + if err := unmapRunData(c.runData); err != nil { + panic(fmt.Sprintf("error unmapping rundata: %v", err)) + } + } + if err := syscall.Close(int(c.fd)); err != nil { + panic(fmt.Sprintf("error closing vCPU fd: %v", err)) + } + case vCPUGuest, vCPUWaiter: + // Should never happen; waited above. + panic("vCPU disposed in guest state") + default: + // Should never happen; not a valid state. + panic(fmt.Sprintf("vCPU in invalid state: %v", state)) + } + } + + // Release host mappings. + if m.kernel.PageTables != nil { + m.kernel.PageTables.Release() + } + + // vCPUs are gone: teardown machine state. + if err := syscall.Close(m.fd); err != nil { + panic(fmt.Sprintf("error closing VM fd: %v", err)) + } +} + +// Get gets an available vCPU. +func (m *machine) Get() (*vCPU, error) { + runtime.LockOSThread() + tid := procid.Current() + m.mu.Lock() + + for { + // Check for an exact match. + if c := m.vCPUs[tid]; c != nil && c.mu.TryLock() { + m.mu.Unlock() + return c, nil + } + + // Scan for an available vCPU. + for origTID, c := range m.vCPUs { + if c.LockInState(vCPUReady) { + delete(m.vCPUs, origTID) + m.vCPUs[tid] = c + m.mu.Unlock() + + // We need to reload thread-local segments as + // we have origTID != tid and the vCPU state + // may be stale. + c.loadSegments() + atomic.StoreUint64(&c.tid, tid) + return c, nil + } + } + + // Everything is busy executing user code (locked). + // + // We hold the pool lock here, so we should be able to kick something + // out of kernel mode and have it bounce into host mode when it tries + // to grab the vCPU again. + for _, c := range m.vCPUs { + if c.State() != vCPUWaiter { + c.Bounce() + } + } + + // Give other threads an opportunity to run. + yield() + } +} + +// Put puts the current vCPU. +func (m *machine) Put(c *vCPU) { + c.Unlock() + runtime.UnlockOSThread() +} + +// State returns the current state. +func (c *vCPU) State() uintptr { + return atomic.LoadUintptr(&c.state) +} + +// Lock locks the vCPU. +func (c *vCPU) Lock() { + c.mu.Lock() +} + +// Invalidate invalidates caches. +func (c *vCPU) Invalidate() { +} + +// LockInState locks the vCPU if it is in the given state and TryLock succeeds. +func (c *vCPU) LockInState(state uintptr) bool { + if c.State() == state && c.mu.TryLock() { + if c.State() != state { + c.mu.Unlock() + return false + } + return true + } + return false +} + +// Unlock unlocks the given vCPU. +func (c *vCPU) Unlock() { + // Ensure we're out of guest mode, if necessary. + if c.State() == vCPUWaiter { + redpill() // Force guest mode exit. + } + c.mu.Unlock() +} + +// NotifyInterrupt implements interrupt.Receiver.NotifyInterrupt. +func (c *vCPU) NotifyInterrupt() { + c.Bounce() +} + +// pid is used below in bounce. +var pid = syscall.Getpid() + +// Bounce ensures that the vCPU bounces back to the kernel. +// +// In practice, this means returning EAGAIN from running user code. The vCPU +// will be unlocked and relock, and the kernel is guaranteed to check for +// interrupt notifications (e.g. injected via Notify) and invalidations. +func (c *vCPU) Bounce() { + for { + if c.mu.TryLock() { + // We know that the vCPU must be in the kernel already, + // because the lock was not acquired. We specifically + // don't want to call bounce in this case, because it's + // not necessary to knock the vCPU out of guest mode. + c.mu.Unlock() + return + } + + if state := c.State(); state == vCPUGuest || state == vCPUWaiter { + // We know that the vCPU was in guest mode, so a single signal + // interruption will guarantee that a transition takes place. + syscall.Tgkill(pid, int(atomic.LoadUint64(&c.tid)), bounceSignal) + return + } + + // Someone holds the lock, but the vCPU is not yet transitioned + // into guest mode. It's in the critical section; give it time. + yield() + } +} diff --git a/pkg/sentry/platform/kvm/machine_amd64.go b/pkg/sentry/platform/kvm/machine_amd64.go new file mode 100644 index 000000000..dfa691e88 --- /dev/null +++ b/pkg/sentry/platform/kvm/machine_amd64.go @@ -0,0 +1,168 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "fmt" + "reflect" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// initArchState initializes architecture-specific state. +func (m *machine) initArchState(vCPUs int) error { + // Set the legacy TSS address. This address is covered by the reserved + // range (up to 4GB). In fact, this is a main reason it exists. + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(m.fd), + _KVM_SET_TSS_ADDR, + uintptr(reservedMemory-(3*usermem.PageSize))); errno != 0 { + return errno + } + return nil +} + +// initArchState initializes architecture-specific state. +func (c *vCPU) initArchState() error { + var ( + kernelSystemRegs systemRegs + kernelUserRegs userRegs + ) + + // Set base control registers. + kernelSystemRegs.CR0 = c.CR0() + kernelSystemRegs.CR4 = c.CR4() + kernelSystemRegs.EFER = c.EFER() + + // Set the IDT & GDT in the registers. + kernelSystemRegs.IDT.base, kernelSystemRegs.IDT.limit = c.IDT() + kernelSystemRegs.GDT.base, kernelSystemRegs.GDT.limit = c.GDT() + kernelSystemRegs.CS.Load(&ring0.KernelCodeSegment, ring0.Kcode) + kernelSystemRegs.DS.Load(&ring0.UserDataSegment, ring0.Udata) + kernelSystemRegs.ES.Load(&ring0.UserDataSegment, ring0.Udata) + kernelSystemRegs.SS.Load(&ring0.KernelDataSegment, ring0.Kdata) + kernelSystemRegs.FS.Load(&ring0.UserDataSegment, ring0.Udata) + kernelSystemRegs.GS.Load(&ring0.UserDataSegment, ring0.Udata) + tssBase, tssLimit, tss := c.TSS() + kernelSystemRegs.TR.Load(tss, ring0.Tss) + kernelSystemRegs.TR.base = tssBase + kernelSystemRegs.TR.limit = uint32(tssLimit) + + // Point to kernel page tables. + kernelSystemRegs.CR3 = c.machine.kernel.PageTables.FlushCR3() + + // Set the CPUID; this is required before setting system registers, + // since KVM will reject several CR4 bits if the CPUID does not + // indicate the support is available. + if err := c.setCPUID(); err != nil { + return err + } + + // Set the entrypoint for the kernel. + kernelUserRegs.RIP = uint64(reflect.ValueOf(ring0.Start).Pointer()) + kernelUserRegs.RAX = uint64(reflect.ValueOf(&c.CPU).Pointer()) + kernelUserRegs.RFLAGS = ring0.KernelFlagsSet + + // Set the system registers. + if err := c.setSystemRegisters(&kernelSystemRegs); err != nil { + return err + } + + // Set the user registers. + if err := c.setUserRegisters(&kernelUserRegs); err != nil { + return err + } + + // Set the time offset to the host native time. + return c.setSystemTime() +} + +// SwitchToUser unpacks architectural-details. +func (c *vCPU) SwitchToUser(regs *syscall.PtraceRegs, fpState *byte, pt *pagetables.PageTables, flags ring0.Flags) (*arch.SignalInfo, usermem.AccessType, error) { + // See below. + var vector ring0.Vector + + // Past this point, stack growth can cause system calls (and a break + // from guest mode). So we need to ensure that between the bluepill + // call here and the switch call immediately below, no additional + // allocations occur. + entersyscall() + bluepill(c) + vector = c.CPU.SwitchToUser(regs, fpState, pt, flags) + exitsyscall() + + // Free and clear. + switch vector { + case ring0.Debug, ring0.Breakpoint: + info := &arch.SignalInfo{Signo: int32(syscall.SIGTRAP)} + return info, usermem.AccessType{}, platform.ErrContextSignal + + case ring0.PageFault: + bluepill(c) // Probably no-op, but may not be. + faultAddr := ring0.ReadCR2() + code, user := c.ErrorCode() + if !user { + // The last fault serviced by this CPU was not a user + // fault, so we can't reliably trust the faultAddr or + // the code provided here. We need to re-execute. + return nil, usermem.NoAccess, platform.ErrContextInterrupt + } + info := &arch.SignalInfo{Signo: int32(syscall.SIGSEGV)} + info.SetAddr(uint64(faultAddr)) + accessType := usermem.AccessType{ + Read: code&(1<<1) == 0, + Write: code&(1<<1) != 0, + Execute: code&(1<<4) != 0, + } + return info, accessType, platform.ErrContextSignal + + case ring0.GeneralProtectionFault: + if !ring0.IsCanonical(regs.Rip) { + // If the RIP is non-canonical, it's a SEGV. + info := &arch.SignalInfo{Signo: int32(syscall.SIGSEGV)} + return info, usermem.AccessType{}, platform.ErrContextSignal + } + // Otherwise, we deliver a SIGBUS. + info := &arch.SignalInfo{Signo: int32(syscall.SIGBUS)} + return info, usermem.AccessType{}, platform.ErrContextSignal + + case ring0.InvalidOpcode: + info := &arch.SignalInfo{Signo: int32(syscall.SIGILL)} + return info, usermem.AccessType{}, platform.ErrContextSignal + + case ring0.X87FloatingPointException: + info := &arch.SignalInfo{Signo: int32(syscall.SIGFPE)} + return info, usermem.AccessType{}, platform.ErrContextSignal + + case ring0.Vector(bounce): + redpill() // Bail and reacqire. + return nil, usermem.NoAccess, platform.ErrContextInterrupt + + case ring0.Syscall, ring0.SyscallInt80: + // System call executed. + return nil, usermem.NoAccess, nil + + default: + panic(fmt.Sprintf("unexpected vector: 0x%x", vector)) + } +} diff --git a/pkg/sentry/platform/kvm/machine_amd64_unsafe.go b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go new file mode 100644 index 000000000..c2bcb3a47 --- /dev/null +++ b/pkg/sentry/platform/kvm/machine_amd64_unsafe.go @@ -0,0 +1,156 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package kvm + +import ( + "fmt" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/time" +) + +// setMemoryRegion initializes a region. +// +// This may be called from bluepillHandler, and therefore returns an errno +// directly (instead of wrapping in an error) to avoid allocations. +// +//go:nosplit +func (m *machine) setMemoryRegion(slot int, physical, length, virtual uintptr) syscall.Errno { + userRegion := userMemoryRegion{ + slot: uint32(slot), + flags: 0, + guestPhysAddr: uint64(physical), + memorySize: uint64(length), + userspaceAddr: uint64(virtual), + } + + // Set the region. + _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(m.fd), + _KVM_SET_USER_MEMORY_REGION, + uintptr(unsafe.Pointer(&userRegion))) + return errno +} + +// loadSegments copies the current segments. +// +// This may be called from within the signal context and throws on error. +// +//go:nosplit +func (c *vCPU) loadSegments() { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_ARCH_PRCTL, + linux.ARCH_GET_FS, + uintptr(unsafe.Pointer(&c.CPU.Registers().Fs_base)), + 0); errno != 0 { + throw("getting FS segment") + } + if _, _, errno := syscall.RawSyscall( + syscall.SYS_ARCH_PRCTL, + linux.ARCH_GET_GS, + uintptr(unsafe.Pointer(&c.CPU.Registers().Gs_base)), + 0); errno != 0 { + throw("getting GS segment") + } +} + +// setUserRegisters sets user registers in the vCPU. +func (c *vCPU) setUserRegisters(uregs *userRegs) error { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_SET_REGS, + uintptr(unsafe.Pointer(uregs))); errno != 0 { + return fmt.Errorf("error setting user registers: %v", errno) + } + return nil +} + +// setSystemRegisters sets system registers. +func (c *vCPU) setSystemRegisters(sregs *systemRegs) error { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_SET_SREGS, + uintptr(unsafe.Pointer(sregs))); errno != 0 { + return fmt.Errorf("error setting system registers: %v", errno) + } + return nil +} + +// setCPUID sets the CPUID to be used by the guest. +func (c *vCPU) setCPUID() error { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_SET_CPUID2, + uintptr(unsafe.Pointer(&cpuidSupported))); errno != 0 { + return fmt.Errorf("error setting CPUID: %v", errno) + } + return nil +} + +// setSystemTime sets the TSC for the vCPU. +// +// FIXME: This introduces a slight TSC offset between host and +// guest, which may vary per vCPU. +func (c *vCPU) setSystemTime() error { + const _MSR_IA32_TSC = 0x00000010 + registers := modelControlRegisters{ + nmsrs: 1, + } + registers.entries[0] = modelControlRegister{ + index: _MSR_IA32_TSC, + data: uint64(time.Rdtsc()), + } + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_SET_MSRS, + uintptr(unsafe.Pointer(®isters))); errno != 0 { + return fmt.Errorf("error setting system time: %v", errno) + } + return nil +} + +// setSignalMask sets the vCPU signal mask. +// +// This must be called prior to running the vCPU. +func (c *vCPU) setSignalMask() error { + // The layout of this structure implies that it will not necessarily be + // the same layout chosen by the Go compiler. It gets fudged here. + var data struct { + length uint32 + mask1 uint32 + mask2 uint32 + _ uint32 + } + data.length = 8 // Fixed sigset size. + data.mask1 = ^uint32(bounceSignalMask & 0xffffffff) + data.mask2 = ^uint32(bounceSignalMask >> 32) + if _, _, errno := syscall.RawSyscall( + syscall.SYS_IOCTL, + uintptr(c.fd), + _KVM_SET_SIGNAL_MASK, + uintptr(unsafe.Pointer(&data))); errno != 0 { + return fmt.Errorf("error setting signal mask: %v", errno) + } + return nil +} diff --git a/pkg/sentry/platform/kvm/machine_unsafe.go b/pkg/sentry/platform/kvm/machine_unsafe.go new file mode 100644 index 000000000..da67e23f6 --- /dev/null +++ b/pkg/sentry/platform/kvm/machine_unsafe.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "fmt" + "sync/atomic" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" +) + +//go:linkname entersyscall runtime.entersyscall +func entersyscall() + +//go:linkname exitsyscall runtime.exitsyscall +func exitsyscall() + +// TranslateToVirtual implements pagetables.Translater.TranslateToPhysical. +func (m *machine) TranslateToPhysical(ptes *pagetables.PTEs) uintptr { + // The length doesn't matter because all these translations require + // only a single page, which is guaranteed to be satisfied. + physical, _, ok := TranslateToPhysical(uintptr(unsafe.Pointer(ptes))) + if !ok { + panic("unable to translate pagetables.Node to physical address") + } + return physical +} + +// mapRunData maps the vCPU run data. +func mapRunData(fd int) (*runData, error) { + r, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, + 0, + uintptr(runDataSize), + syscall.PROT_READ|syscall.PROT_WRITE, + syscall.MAP_SHARED, + uintptr(fd), + 0) + if errno != 0 { + return nil, fmt.Errorf("error mapping runData: %v", errno) + } + return (*runData)(unsafe.Pointer(r)), nil +} + +// unmapRunData unmaps the vCPU run data. +func unmapRunData(r *runData) error { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_MUNMAP, + uintptr(unsafe.Pointer(r)), + uintptr(runDataSize), + 0); errno != 0 { + return fmt.Errorf("error unmapping runData: %v", errno) + } + return nil +} + +// notify notifies that the vCPU has returned to host mode. +// +// This may be called by a signal handler and therefore throws on error. +// +//go:nosplit +func (c *vCPU) notify() { + _, _, errno := syscall.RawSyscall6( + syscall.SYS_FUTEX, + uintptr(unsafe.Pointer(&c.state)), + linux.FUTEX_WAKE, + ^uintptr(0), // Number of waiters. + 0, 0, 0) + if errno != 0 { + throw("futex wake error") + } +} + +// wait waits for the vCPU to return to host mode. +// +// This panics on error. +func (c *vCPU) wait() { + if !atomic.CompareAndSwapUintptr(&c.state, vCPUGuest, vCPUWaiter) { + return // Nothing to wait for. + } + for { + _, _, errno := syscall.Syscall6( + syscall.SYS_FUTEX, + uintptr(unsafe.Pointer(&c.state)), + linux.FUTEX_WAIT, + uintptr(vCPUWaiter), // Expected value. + 0, 0, 0) + if errno == syscall.EINTR { + continue + } else if errno == syscall.EAGAIN { + break + } else if errno != 0 { + panic("futex wait error") + } + break + } +} diff --git a/pkg/sentry/platform/kvm/physical_map.go b/pkg/sentry/platform/kvm/physical_map.go new file mode 100644 index 000000000..5d55c9486 --- /dev/null +++ b/pkg/sentry/platform/kvm/physical_map.go @@ -0,0 +1,221 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "fmt" + "sort" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +const ( + // reservedMemory is a chunk of physical memory reserved starting at + // physical address zero. There are some special pages in this region, + // so we just call the whole thing off. + // + // Other architectures may define this to be zero. + reservedMemory = 0x100000000 +) + +type region struct { + virtual uintptr + length uintptr +} + +type physicalRegion struct { + region + physical uintptr +} + +// physicalRegions contains a list of available physical regions. +// +// The physical value used in physicalRegions is a number indicating the +// physical offset, aligned appropriately and starting above reservedMemory. +var physicalRegions []physicalRegion + +// fillAddressSpace fills the host address space with PROT_NONE mappings until +// the number of available bits until we have a host address space size that is +// equal to the physical address space. +// +// The excluded regions are returned. +func fillAddressSpace() (excludedRegions []region) { + // We can cut vSize in half, because the kernel will be using the top + // half and we ignore it while constructing mappings. It's as if we've + // already excluded half the possible addresses. + vSize := uintptr(1) << ring0.VirtualAddressBits() + vSize = vSize >> 1 + + // We exclude reservedMemory below from our physical memory size, so it + // needs to be dropped here as well. Otherwise, we could end up with + // physical addresses that are beyond what is mapped. + pSize := uintptr(1) << ring0.PhysicalAddressBits() + pSize -= reservedMemory + + // Sanity check. + if vSize < pSize { + panic(fmt.Sprintf("vSize (%x) < pSize (%x)", vSize, pSize)) + } + + // Add specifically excluded regions; see excludeVirtualRegion. + applyVirtualRegions(func(vr virtualRegion) { + if excludeVirtualRegion(vr) { + excludedRegions = append(excludedRegions, vr.region) + vSize -= vr.length + log.Infof("excluded: virtual [%x,%x)", vr.virtual, vr.virtual+vr.length) + } + }) + + // Calculate the required space and fill it. + // + // Note carefully that we add faultBlockSize to required up front, and + // on each iteration of the loop below (i.e. each new physical region + // we define), we add faultBlockSize again. This is done because the + // computation of physical regions will ensure proper alignments with + // faultBlockSize, potentially causing up to faultBlockSize bytes in + // internal fragmentation for each physical region. So we need to + // account for this properly during allocation. + requiredAddr, ok := usermem.Addr(vSize - pSize + faultBlockSize).RoundUp() + if !ok { + panic(fmt.Sprintf( + "overflow for vSize (%x) - pSize (%x) + faultBlockSize (%x)", + vSize, pSize, faultBlockSize)) + } + required := uintptr(requiredAddr) + current := required // Attempted mmap size. + for filled := uintptr(0); filled < required && current > 0; { + addr, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, + 0, // Suggested address. + current, + syscall.PROT_NONE, + syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE|syscall.MAP_NORESERVE, + 0, 0) + if errno != 0 { + // Attempt half the size; overflow not possible. + currentAddr, _ := usermem.Addr(current >> 1).RoundUp() + current = uintptr(currentAddr) + continue + } + // We filled a block. + filled += current + excludedRegions = append(excludedRegions, region{ + virtual: addr, + length: current, + }) + // See comment above. + if filled != required { + required += faultBlockSize + } + } + if current == 0 { + panic("filling address space failed") + } + sort.Slice(excludedRegions, func(i, j int) bool { + return excludedRegions[i].virtual < excludedRegions[j].virtual + }) + for _, r := range excludedRegions { + log.Infof("region: virtual [%x,%x)", r.virtual, r.virtual+r.length) + } + return excludedRegions +} + +// computePhysicalRegions computes physical regions. +func computePhysicalRegions(excludedRegions []region) (physicalRegions []physicalRegion) { + physical := uintptr(reservedMemory) + addValidRegion := func(virtual, length uintptr) { + if length == 0 { + return + } + if virtual == 0 { + virtual += usermem.PageSize + length -= usermem.PageSize + } + if end := virtual + length; end > ring0.MaximumUserAddress { + length -= (end - ring0.MaximumUserAddress) + } + if length == 0 { + return + } + // Round physical up to the same alignment as the virtual + // address (with respect to faultBlockSize). + if offset := virtual &^ faultBlockMask; physical&^faultBlockMask != offset { + if newPhysical := (physical & faultBlockMask) + offset; newPhysical > physical { + physical = newPhysical // Round up by only a little bit. + } else { + physical = ((physical + faultBlockSize) & faultBlockMask) + offset + } + } + physicalRegions = append(physicalRegions, physicalRegion{ + region: region{ + virtual: virtual, + length: length, + }, + physical: physical, + }) + physical += length + } + lastExcludedEnd := uintptr(0) + for _, r := range excludedRegions { + addValidRegion(lastExcludedEnd, r.virtual-lastExcludedEnd) + lastExcludedEnd = r.virtual + r.length + } + addValidRegion(lastExcludedEnd, ring0.MaximumUserAddress-lastExcludedEnd) + + // Dump our all physical regions. + for _, r := range physicalRegions { + log.Infof("physicalRegion: virtual [%x,%x) => physical [%x,%x)", + r.virtual, r.virtual+r.length, r.physical, r.physical+r.length) + } + return physicalRegions +} + +// physicalInit initializes physical address mappings. +func physicalInit() { + physicalRegions = computePhysicalRegions(fillAddressSpace()) +} + +// applyPhysicalRegions applies the given function on physical regions. +// +// Iteration continues as long as true is returned. The return value is the +// return from the last call to fn, or true if there are no entries. +// +// Precondition: physicalInit must have been called. +func applyPhysicalRegions(fn func(pr physicalRegion) bool) bool { + for _, pr := range physicalRegions { + if !fn(pr) { + return false + } + } + return true +} + +// TranslateToPhysical translates the given virtual address. +// +// Precondition: physicalInit must have been called. +func TranslateToPhysical(virtual uintptr) (physical uintptr, length uintptr, ok bool) { + ok = !applyPhysicalRegions(func(pr physicalRegion) bool { + if pr.virtual <= virtual && virtual < pr.virtual+pr.length { + physical = pr.physical + (virtual - pr.virtual) + length = pr.length - (virtual - pr.virtual) + return false + } + return true + }) + return +} diff --git a/pkg/sentry/platform/kvm/testutil/BUILD b/pkg/sentry/platform/kvm/testutil/BUILD new file mode 100644 index 000000000..8533a8d89 --- /dev/null +++ b/pkg/sentry/platform/kvm/testutil/BUILD @@ -0,0 +1,15 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "testutil", + testonly = 1, + srcs = [ + "testutil.go", + "testutil_amd64.go", + "testutil_amd64.s", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/kvm/testutil", + visibility = ["//pkg/sentry/platform/kvm:__pkg__"], +) diff --git a/pkg/sentry/platform/kvm/testutil/testutil.go b/pkg/sentry/platform/kvm/testutil/testutil.go new file mode 100644 index 000000000..8a614e25d --- /dev/null +++ b/pkg/sentry/platform/kvm/testutil/testutil.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil provides common assembly stubs for testing. +package testutil + +import ( + "fmt" + "strings" +) + +// Getpid executes a trivial system call. +func Getpid() + +// Touch touches the value in the first register. +func Touch() + +// SyscallLoop executes a syscall and loops. +func SyscallLoop() + +// SpinLoop spins on the CPU. +func SpinLoop() + +// HaltLoop immediately halts and loops. +func HaltLoop() + +// TwiddleRegsFault twiddles registers then faults. +func TwiddleRegsFault() + +// TwiddleRegsSyscall twiddles registers then executes a syscall. +func TwiddleRegsSyscall() + +// TwiddleSegments reads segments into known registers. +func TwiddleSegments() + +// FloatingPointWorks is a floating point test. +// +// It returns true or false. +func FloatingPointWorks() bool + +// RegisterMismatchError is used for checking registers. +type RegisterMismatchError []string + +// Error returns a human-readable error. +func (r RegisterMismatchError) Error() string { + return strings.Join([]string(r), ";") +} + +// addRegisterMisatch allows simple chaining of register mismatches. +func addRegisterMismatch(err error, reg string, got, expected interface{}) error { + errStr := fmt.Sprintf("%s got %08x, expected %08x", reg, got, expected) + switch r := err.(type) { + case nil: + // Return a new register mismatch. + return RegisterMismatchError{errStr} + case RegisterMismatchError: + // Append the error. + r = append(r, errStr) + return r + default: + // Leave as is. + return err + } +} diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.go b/pkg/sentry/platform/kvm/testutil/testutil_amd64.go new file mode 100644 index 000000000..39286a0af --- /dev/null +++ b/pkg/sentry/platform/kvm/testutil/testutil_amd64.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package testutil + +import ( + "reflect" + "syscall" +) + +// SetTestTarget sets the rip appropriately. +func SetTestTarget(regs *syscall.PtraceRegs, fn func()) { + regs.Rip = uint64(reflect.ValueOf(fn).Pointer()) +} + +// SetTouchTarget sets rax appropriately. +func SetTouchTarget(regs *syscall.PtraceRegs, target *uintptr) { + if target != nil { + regs.Rax = uint64(reflect.ValueOf(target).Pointer()) + } else { + regs.Rax = 0 + } +} + +// RewindSyscall rewinds a syscall RIP. +func RewindSyscall(regs *syscall.PtraceRegs) { + regs.Rip -= 2 +} + +// SetTestRegs initializes registers to known values. +func SetTestRegs(regs *syscall.PtraceRegs) { + regs.R15 = 0x15 + regs.R14 = 0x14 + regs.R13 = 0x13 + regs.R12 = 0x12 + regs.Rbp = 0xb9 + regs.Rbx = 0xb4 + regs.R11 = 0x11 + regs.R10 = 0x10 + regs.R9 = 0x09 + regs.R8 = 0x08 + regs.Rax = 0x44 + regs.Rcx = 0xc4 + regs.Rdx = 0xd4 + regs.Rsi = 0x51 + regs.Rdi = 0xd1 + regs.Rsp = 0x59 +} + +// CheckTestRegs checks that registers were twiddled per TwiddleRegs. +func CheckTestRegs(regs *syscall.PtraceRegs, full bool) (err error) { + if need := ^uint64(0x15); regs.R15 != need { + err = addRegisterMismatch(err, "R15", regs.R15, need) + } + if need := ^uint64(0x14); regs.R14 != need { + err = addRegisterMismatch(err, "R14", regs.R14, need) + } + if need := ^uint64(0x13); regs.R13 != need { + err = addRegisterMismatch(err, "R13", regs.R13, need) + } + if need := ^uint64(0x12); regs.R12 != need { + err = addRegisterMismatch(err, "R12", regs.R12, need) + } + if need := ^uint64(0xb9); regs.Rbp != need { + err = addRegisterMismatch(err, "Rbp", regs.Rbp, need) + } + if need := ^uint64(0xb4); regs.Rbx != need { + err = addRegisterMismatch(err, "Rbx", regs.Rbx, need) + } + if need := ^uint64(0x10); regs.R10 != need { + err = addRegisterMismatch(err, "R10", regs.R10, need) + } + if need := ^uint64(0x09); regs.R9 != need { + err = addRegisterMismatch(err, "R9", regs.R9, need) + } + if need := ^uint64(0x08); regs.R8 != need { + err = addRegisterMismatch(err, "R8", regs.R8, need) + } + if need := ^uint64(0x44); regs.Rax != need { + err = addRegisterMismatch(err, "Rax", regs.Rax, need) + } + if need := ^uint64(0xd4); regs.Rdx != need { + err = addRegisterMismatch(err, "Rdx", regs.Rdx, need) + } + if need := ^uint64(0x51); regs.Rsi != need { + err = addRegisterMismatch(err, "Rsi", regs.Rsi, need) + } + if need := ^uint64(0xd1); regs.Rdi != need { + err = addRegisterMismatch(err, "Rdi", regs.Rdi, need) + } + if need := ^uint64(0x59); regs.Rsp != need { + err = addRegisterMismatch(err, "Rsp", regs.Rsp, need) + } + // Rcx & R11 are ignored if !full is set. + if need := ^uint64(0x11); full && regs.R11 != need { + err = addRegisterMismatch(err, "R11", regs.R11, need) + } + if need := ^uint64(0xc4); full && regs.Rcx != need { + err = addRegisterMismatch(err, "Rcx", regs.Rcx, need) + } + return +} + +var fsData uint64 = 0x55 +var gsData uint64 = 0x85 + +// SetTestSegments initializes segments to known values. +func SetTestSegments(regs *syscall.PtraceRegs) { + regs.Fs_base = uint64(reflect.ValueOf(&fsData).Pointer()) + regs.Gs_base = uint64(reflect.ValueOf(&gsData).Pointer()) +} + +// CheckTestSegments checks that registers were twiddled per TwiddleSegments. +func CheckTestSegments(regs *syscall.PtraceRegs) (err error) { + if regs.Rax != fsData { + err = addRegisterMismatch(err, "Rax", regs.Rax, fsData) + } + if regs.Rbx != gsData { + err = addRegisterMismatch(err, "Rbx", regs.Rcx, gsData) + } + return +} diff --git a/pkg/sentry/platform/kvm/testutil/testutil_amd64.s b/pkg/sentry/platform/kvm/testutil/testutil_amd64.s new file mode 100644 index 000000000..3b5ad8817 --- /dev/null +++ b/pkg/sentry/platform/kvm/testutil/testutil_amd64.s @@ -0,0 +1,98 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +// test_util_amd64.s provides AMD64 test functions. + +#include "funcdata.h" +#include "textflag.h" + +TEXT ·Getpid(SB),NOSPLIT,$0 + NO_LOCAL_POINTERS + MOVQ $39, AX // getpid + SYSCALL + RET + +TEXT ·Touch(SB),NOSPLIT,$0 +start: + MOVQ 0(AX), BX // deref AX + MOVQ $39, AX // getpid + SYSCALL + JMP start + +TEXT ·HaltLoop(SB),NOSPLIT,$0 +start: + HLT + JMP start + +TEXT ·SyscallLoop(SB),NOSPLIT,$0 +start: + SYSCALL + JMP start + +TEXT ·SpinLoop(SB),NOSPLIT,$0 +start: + JMP start + +TEXT ·FloatingPointWorks(SB),NOSPLIT,$0-8 + NO_LOCAL_POINTERS + MOVQ $1, AX + MOVQ AX, X0 + MOVQ $39, AX // getpid + SYSCALL + MOVQ X0, AX + CMPQ AX, $1 + SETEQ ret+0(FP) + RET + +#define TWIDDLE_REGS() \ + NOTQ R15; \ + NOTQ R14; \ + NOTQ R13; \ + NOTQ R12; \ + NOTQ BP; \ + NOTQ BX; \ + NOTQ R11; \ + NOTQ R10; \ + NOTQ R9; \ + NOTQ R8; \ + NOTQ AX; \ + NOTQ CX; \ + NOTQ DX; \ + NOTQ SI; \ + NOTQ DI; \ + NOTQ SP; + +TEXT ·TwiddleRegsSyscall(SB),NOSPLIT,$0 + TWIDDLE_REGS() + SYSCALL + RET // never reached + +TEXT ·TwiddleRegsFault(SB),NOSPLIT,$0 + TWIDDLE_REGS() + JMP AX // must fault + RET // never reached + +#define READ_FS() BYTE $0x64; BYTE $0x48; BYTE $0x8b; BYTE $0x00; +#define READ_GS() BYTE $0x65; BYTE $0x48; BYTE $0x8b; BYTE $0x00; + +TEXT ·TwiddleSegments(SB),NOSPLIT,$0 + MOVQ $0x0, AX + READ_GS() + MOVQ AX, BX + MOVQ $0x0, AX + READ_FS() + SYSCALL + RET // never reached diff --git a/pkg/sentry/platform/kvm/virtual_map.go b/pkg/sentry/platform/kvm/virtual_map.go new file mode 100644 index 000000000..0d3fbe043 --- /dev/null +++ b/pkg/sentry/platform/kvm/virtual_map.go @@ -0,0 +1,113 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type virtualRegion struct { + region + accessType usermem.AccessType + shared bool + offset uintptr + filename string +} + +// mapsLine matches a single line from /proc/PID/maps. +var mapsLine = regexp.MustCompile("([0-9a-f]+)-([0-9a-f]+) ([r-][w-][x-][sp]) ([0-9a-f]+) [0-9a-f]{2}:[0-9a-f]{2,} [0-9]+\\s+(.*)") + +// excludeRegion returns true if these regions should be excluded from the +// physical map. Virtual regions need to be excluded if get_user_pages will +// fail on those addresses, preventing KVM from satisfying EPT faults. +// +// This includes the VVAR page because the VVAR page may be mapped as I/O +// memory. And the VDSO page is knocked out because the VVAR page is not even +// recorded in /proc/self/maps on older kernels; knocking out the VDSO page +// prevents code in the VDSO from accessing the VVAR address. +// +// This is called by the physical map functions, not applyVirtualRegions. +func excludeVirtualRegion(r virtualRegion) bool { + return r.filename == "[vvar]" || r.filename == "[vdso]" +} + +// applyVirtualRegions parses the process maps file. +// +// Unlike mappedRegions, these are not consistent over time. +func applyVirtualRegions(fn func(vr virtualRegion)) error { + // Open /proc/self/maps. + f, err := os.Open("/proc/self/maps") + if err != nil { + return err + } + defer f.Close() + + // Parse all entries. + r := bufio.NewReader(f) + for { + b, err := r.ReadBytes('\n') + if b != nil && len(b) > 0 { + m := mapsLine.FindSubmatch(b) + if m == nil { + // This should not happen: kernel bug? + return fmt.Errorf("badly formed line: %v", string(b)) + } + start, err := strconv.ParseUint(string(m[1]), 16, 64) + if err != nil { + return fmt.Errorf("bad start address: %v", string(b)) + } + end, err := strconv.ParseUint(string(m[2]), 16, 64) + if err != nil { + return fmt.Errorf("bad end address: %v", string(b)) + } + read := m[3][0] == 'r' + write := m[3][1] == 'w' + execute := m[3][2] == 'x' + shared := m[3][3] == 's' + offset, err := strconv.ParseUint(string(m[4]), 16, 64) + if err != nil { + return fmt.Errorf("bad offset: %v", string(b)) + } + fn(virtualRegion{ + region: region{ + virtual: uintptr(start), + length: uintptr(end - start), + }, + accessType: usermem.AccessType{ + Read: read, + Write: write, + Execute: execute, + }, + shared: shared, + offset: uintptr(offset), + filename: string(m[5]), + }) + } + if err != nil && err == io.EOF { + break + } else if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/sentry/platform/kvm/virtual_map_test.go b/pkg/sentry/platform/kvm/virtual_map_test.go new file mode 100644 index 000000000..31e5b0e61 --- /dev/null +++ b/pkg/sentry/platform/kvm/virtual_map_test.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kvm + +import ( + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type checker struct { + ok bool +} + +func (c *checker) Contains(addr uintptr) func(virtualRegion) { + c.ok = false // Reset for below calls. + return func(vr virtualRegion) { + if vr.virtual <= addr && addr < vr.virtual+vr.length { + c.ok = true + } + } +} + +func TestParseMaps(t *testing.T) { + c := new(checker) + + // Simple test. + if err := applyVirtualRegions(c.Contains(0)); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // MMap a new page. + addr, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, 0, usermem.PageSize, + syscall.PROT_READ|syscall.PROT_WRITE, + syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE, 0, 0) + if errno != 0 { + t.Fatalf("unexpected map error: %v", errno) + } + + // Re-parse maps. + if err := applyVirtualRegions(c.Contains(addr)); err != nil { + syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0) + t.Fatalf("unexpected error: %v", err) + } + + // Assert that it now does contain the region. + if !c.ok { + syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0) + t.Fatalf("updated map does not contain 0x%08x, expected true", addr) + } + + // Unmap the region. + syscall.RawSyscall(syscall.SYS_MUNMAP, addr, usermem.PageSize, 0) + + // Re-parse maps. + if err := applyVirtualRegions(c.Contains(addr)); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Assert that it once again does _not_ contain the region. + if c.ok { + t.Fatalf("final map does contain 0x%08x, expected false", addr) + } +} diff --git a/pkg/sentry/platform/mmap_min_addr.go b/pkg/sentry/platform/mmap_min_addr.go new file mode 100644 index 000000000..6398e5e01 --- /dev/null +++ b/pkg/sentry/platform/mmap_min_addr.go @@ -0,0 +1,60 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package platform + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// systemMMapMinAddrSource is the source file. +const systemMMapMinAddrSource = "/proc/sys/vm/mmap_min_addr" + +// systemMMapMinAddr is the system's minimum map address. +var systemMMapMinAddr uint64 + +// SystemMMapMinAddr returns the minimum system address. +func SystemMMapMinAddr() usermem.Addr { + return usermem.Addr(systemMMapMinAddr) +} + +// MMapMinAddr is a size zero struct that implements MinUserAddress based on +// the system minimum address. It is suitable for embedding in platforms that +// rely on the system mmap, and thus require the system minimum. +type MMapMinAddr struct { +} + +// MinUserAddress implements platform.MinUserAddresss. +func (*MMapMinAddr) MinUserAddress() usermem.Addr { + return SystemMMapMinAddr() +} + +func init() { + // Open the source file. + b, err := ioutil.ReadFile(systemMMapMinAddrSource) + if err != nil { + panic(fmt.Sprintf("couldn't open %s: %v", systemMMapMinAddrSource, err)) + } + + // Parse the result. + systemMMapMinAddr, err = strconv.ParseUint(strings.TrimSpace(string(b)), 10, 64) + if err != nil { + panic(fmt.Sprintf("couldn't parse %s from %s: %v", string(b), systemMMapMinAddrSource, err)) + } +} diff --git a/pkg/sentry/platform/platform.go b/pkg/sentry/platform/platform.go new file mode 100644 index 000000000..6219dada7 --- /dev/null +++ b/pkg/sentry/platform/platform.go @@ -0,0 +1,428 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package platform provides a Platform abstraction. +// +// See Platform for more information. +package platform + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Platform provides abstractions for execution contexts (Context) and memory +// management (Memory, AddressSpace). +type Platform interface { + // SupportsAddressSpaceIO returns true if AddressSpaces returned by this + // Platform support AddressSpaceIO methods. + // + // The value returned by SupportsAddressSpaceIO is guaranteed to remain + // unchanged over the lifetime of the Platform. + SupportsAddressSpaceIO() bool + + // CooperativelySchedulesAddressSpace returns true if the Platform has a + // limited number of AddressSpaces, such that mm.MemoryManager.Deactivate + // should call AddressSpace.Release when there are no goroutines that + // require the mm.MemoryManager to have an active AddressSpace. + // + // The value returned by CooperativelySchedulesAddressSpace is guaranteed + // to remain unchanged over the lifetime of the Platform. + CooperativelySchedulesAddressSpace() bool + + // DetectsCPUPreemption returns true if Contexts returned by the Platform + // can reliably return ErrContextCPUPreempted. + DetectsCPUPreemption() bool + + // MapUnit returns the alignment used for optional mappings into this + // platform's AddressSpaces. Higher values indicate lower per-page + // costs for AddressSpace.MapInto. As a special case, a MapUnit of 0 + // indicates that the cost of AddressSpace.MapInto is effectively + // independent of the number of pages mapped. If MapUnit is non-zero, + // it must be a power-of-2 multiple of usermem.PageSize. + MapUnit() uint64 + + // MinUserAddress returns the minimum mappable address on this + // platform. + MinUserAddress() usermem.Addr + + // MaxUserAddress returns the maximum mappable address on this + // platform. + MaxUserAddress() usermem.Addr + + // NewAddressSpace returns a new memory context for this platform. + // + // If mappingsID is not nil, the platform may assume that (1) all calls + // to NewAddressSpace with the same mappingsID represent the same + // (mutable) set of mappings, and (2) the set of mappings has not + // changed since the last time AddressSpace.Release was called on an + // AddressSpace returned by a call to NewAddressSpace with the same + // mappingsID. + // + // If a new AddressSpace cannot be created immediately, a nil + // AddressSpace is returned, along with channel that is closed when + // the caller should retry a call to NewAddressSpace. + // + // In general, this blocking behavior only occurs when + // CooperativelySchedulesAddressSpace (above) returns false. + NewAddressSpace(mappingsID interface{}) (AddressSpace, <-chan struct{}, error) + + // NewContext returns a new execution context. + NewContext() Context + + // Memory returns memory for allocations. + Memory() Memory + + // PreemptAllCPUs causes all concurrent calls to Context.Switch(), as well + // as the first following call to Context.Switch() for each Context, to + // return ErrContextCPUPreempted. + // + // PreemptAllCPUs is only supported if DetectsCPUPremption() == true. + // Platforms for which this does not hold may panic if PreemptAllCPUs is + // called. + PreemptAllCPUs() error +} + +// NoCPUPreemptionDetection implements Platform.DetectsCPUPreemption and +// dependent methods for Platforms that do not support this feature. +type NoCPUPreemptionDetection struct{} + +// DetectsCPUPreemption implements Platform.DetectsCPUPreemption. +func (NoCPUPreemptionDetection) DetectsCPUPreemption() bool { + return false +} + +// PreemptAllCPUs implements Platform.PreemptAllCPUs. +func (NoCPUPreemptionDetection) PreemptAllCPUs() error { + panic("This platform does not support CPU preemption detection") +} + +// Context represents the execution context for a single thread. +type Context interface { + // Switch resumes execution of the thread specified by the arch.Context + // in the provided address space. This call will block while the thread + // is executing. + // + // If cpu is non-negative, and it is not the number of the CPU that the + // thread executes on, Context should return ErrContextCPUPreempted. cpu + // can only be non-negative if Platform.DetectsCPUPreemption() is true; + // Contexts from Platforms for which this does not hold may ignore cpu, or + // panic if cpu is non-negative. + // + // Switch may return one of the following special errors: + // + // - nil: The Context invoked a system call. + // + // - ErrContextSignal: The Context was interrupted by a signal. The + // returned *arch.SignalInfo contains information about the signal. If + // arch.SignalInfo.Signo == SIGSEGV, the returned usermem.AccessType + // contains the access type of the triggering fault. + // + // - ErrContextInterrupt: The Context was interrupted by a call to + // Interrupt(). Switch() may return ErrContextInterrupt spuriously. In + // particular, most implementations of Interrupt() will cause the first + // following call to Switch() to return ErrContextInterrupt if there is no + // concurrent call to Switch(). + // + // - ErrContextCPUPreempted: See the definition of that error for details. + Switch(as AddressSpace, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error) + + // Interrupt interrupts a concurrent call to Switch(), causing it to return + // ErrContextInterrupt. + Interrupt() +} + +var ( + // ErrContextSignal is returned by Context.Switch() to indicate that the + // Context was interrupted by a signal. + ErrContextSignal = fmt.Errorf("interrupted by signal") + + // ErrContextInterrupt is returned by Context.Switch() to indicate that the + // Context was interrupted by a call to Context.Interrupt(). + ErrContextInterrupt = fmt.Errorf("interrupted by platform.Context.Interrupt()") + + // ErrContextCPUPreempted is returned by Context.Switch() to indicate that + // one of the following occurred: + // + // - The CPU executing the Context is not the CPU passed to + // Context.Switch(). + // + // - The CPU executing the Context may have executed another Context since + // the last time it executed this one; or the CPU has previously executed + // another Context, and has never executed this one. + // + // - Platform.PreemptAllCPUs() was called since the last return from + // Context.Switch(). + ErrContextCPUPreempted = fmt.Errorf("interrupted by CPU preemption") +) + +// SignalInterrupt is a signal reserved for use by implementations of +// Context.Interrupt(). The sentry guarantees that it will ignore delivery of +// this signal both to Contexts and to the sentry itself, under the assumption +// that they originate from races with Context.Interrupt(). +// +// NOTE: The Go runtime only guarantees that a small subset +// of signals will be always be unblocked on all threads, one of which +// is SIGCHLD. +const SignalInterrupt = linux.SIGCHLD + +// AddressSpace represents a virtual address space in which a Context can +// execute. +type AddressSpace interface { + // MapFile creates a shared mapping of offsets in fr, from the file + // with file descriptor fd, at address addr. Any existing overlapping + // mappings are silently replaced. + // + // If precommit is true, host memory should be committed to the mapping + // when MapFile returns when possible. The precommit flag is advisory + // and implementations may choose to ignore it. + // + // Preconditions: addr and fr must be page-aligned. length > 0. + // at.Any() == true. + MapFile(addr usermem.Addr, fd int, fr FileRange, at usermem.AccessType, precommit bool) error + + // Unmap unmaps the given range. + // + // Preconditions: addr is page-aligned. length > 0. + Unmap(addr usermem.Addr, length uint64) + + // Release releases this address space. After releasing, a new AddressSpace + // must be acquired via platform.NewAddressSpace(). + Release() error + + // AddressSpaceIO methods are supported iff the associated platform's + // Platform.SupportsAddressSpaceIO() == true. AddressSpaces for which this + // does not hold may panic if AddressSpaceIO methods are invoked. + AddressSpaceIO +} + +// AddressSpaceIO supports IO through the memory mappings installed in an +// AddressSpace. +// +// AddressSpaceIO implementors are responsible for ensuring that address ranges +// are application-mappable. +type AddressSpaceIO interface { + // CopyOut copies len(src) bytes from src to the memory mapped at addr. It + // returns the number of bytes copied. If the number of bytes copied is < + // len(src), it returns a non-nil error explaining why. + CopyOut(addr usermem.Addr, src []byte) (int, error) + + // CopyIn copies len(dst) bytes from the memory mapped at addr to dst. + // It returns the number of bytes copied. If the number of bytes copied is + // < len(dst), it returns a non-nil error explaining why. + CopyIn(addr usermem.Addr, dst []byte) (int, error) + + // ZeroOut sets toZero bytes to 0, starting at addr. It returns the number + // of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a + // non-nil error explaining why. + ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error) + + // SwapUint32 atomically sets the uint32 value at addr to new and returns + // the previous value. + // + // Preconditions: addr must be aligned to a 4-byte boundary. + SwapUint32(addr usermem.Addr, new uint32) (uint32, error) + + // CompareAndSwapUint32 atomically compares the uint32 value at addr to + // old; if they are equal, the value in memory is replaced by new. In + // either case, the previous value stored in memory is returned. + // + // Preconditions: addr must be aligned to a 4-byte boundary. + CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) +} + +// NoAddressSpaceIO implements AddressSpaceIO methods by panicing. +type NoAddressSpaceIO struct{} + +// CopyOut implements AddressSpaceIO.CopyOut. +func (NoAddressSpaceIO) CopyOut(addr usermem.Addr, src []byte) (int, error) { + panic("This platform does not support AddressSpaceIO") +} + +// CopyIn implements AddressSpaceIO.CopyIn. +func (NoAddressSpaceIO) CopyIn(addr usermem.Addr, dst []byte) (int, error) { + panic("This platform does not support AddressSpaceIO") +} + +// ZeroOut implements AddressSpaceIO.ZeroOut. +func (NoAddressSpaceIO) ZeroOut(addr usermem.Addr, toZero uintptr) (uintptr, error) { + panic("This platform does not support AddressSpaceIO") +} + +// SwapUint32 implements AddressSpaceIO.SwapUint32. +func (NoAddressSpaceIO) SwapUint32(addr usermem.Addr, new uint32) (uint32, error) { + panic("This platform does not support AddressSpaceIO") +} + +// CompareAndSwapUint32 implements AddressSpaceIO.CompareAndSwapUint32. +func (NoAddressSpaceIO) CompareAndSwapUint32(addr usermem.Addr, old, new uint32) (uint32, error) { + panic("This platform does not support AddressSpaceIO") +} + +// SegmentationFault is an error returned by AddressSpaceIO methods when IO +// fails due to access of an unmapped page, or a mapped page with insufficient +// permissions. +type SegmentationFault struct { + // Addr is the address at which the fault occurred. + Addr usermem.Addr +} + +// Error implements error.Error. +func (f SegmentationFault) Error() string { + return fmt.Sprintf("segmentation fault at %#x", f.Addr) +} + +// File represents a host file that may be mapped into an AddressSpace. +type File interface { + // MapInto maps fr into as, starting at addr, for accesses of type at. + // + // If precommit is true, the platform should eagerly commit resources (e.g. + // physical memory) to the mapping. The precommit flag is advisory and + // implementations may choose to ignore it. + // + // Note that there is no File.Unmap; clients should use as.Unmap directly. + // + // Preconditions: fr.Start and fr.End must be page-aligned. + // fr.Length() > 0. at.Any() == true. Implementors may define + // additional requirements. + MapInto(as AddressSpace, addr usermem.Addr, fr FileRange, at usermem.AccessType, precommit bool) error + + // MapInternal returns a mapping of the given file offsets in the invoking + // process' address space for reading and writing. The lifetime of the + // returned mapping is implementation-defined. + // + // Note that fr.Start and fr.End need not be page-aligned. + // + // Preconditions: fr.Length() > 0. Implementors may define additional + // requirements. + MapInternal(fr FileRange, at usermem.AccessType) (safemem.BlockSeq, error) + + // IncRef signals that a region in the file is actively referenced through a + // memory map. Implementors must ensure that the contents of a referenced + // region remain consistent. Specifically, mappings returned by MapInternal + // must refer to the same underlying contents. If the implementor also + // implements the Memory interface, the file range must not be reused in a + // different allocation while it has active references. + // + // Preconditions: fr.Start and fr.End must be page-aligned. fr.Length() > 0. + IncRef(fr FileRange) + + // DecRef reduces the frame ref count on the range specified by fr. + // + // Preconditions: fr.Start and fr.End must be page-aligned. fr.Length() > + // 0. DecRef()s on a region must match earlier IncRef()s. + DecRef(fr FileRange) +} + +// FileRange represents a range of uint64 offsets into a File. +// +// type FileRange <generated using go_generics> + +// String implements fmt.Stringer.String. +func (fr FileRange) String() string { + return fmt.Sprintf("[%#x, %#x)", fr.Start, fr.End) +} + +// Memory represents an allocatable File that may be mapped into any +// AddressSpace associated with the same Platform. +type Memory interface { + // Memory implements File methods with the following properties: + // + // - Pages mapped by MapInto must be allocated, and must be unmapped from + // all AddressSpaces before they are freed. + // + // - Pages mapped by MapInternal must be allocated. Returned mappings are + // guaranteed to be valid until the mapped pages are freed. + File + + // Allocate returns a range of pages of the given length, owned by the + // caller and with the given accounting kind. Allocated memory initially has + // a single reference and will automatically be freed when no references to + // them remain. See File.IncRef and File.DecRef. + // + // Preconditions: length must be page-aligned and non-zero. + Allocate(length uint64, kind usage.MemoryKind) (FileRange, error) + + // Decommit releases resources associated with maintaining the contents of + // the given frames. If Decommit succeeds, future accesses of the + // decommitted frames will read zeroes. + // + // Preconditions: fr.Length() > 0. + Decommit(fr FileRange) error + + // UpdateUsage updates the memory usage statistics. This must be called + // before the relevant memory statistics in usage.MemoryAccounting can + // be considered accurate. + UpdateUsage() error + + // TotalUsage returns an aggregate usage for all memory statistics + // except Mapped (which is external to the Memory implementation). This + // is generally much cheaper than UpdateUsage, but will not provide a + // fine-grained breakdown. + TotalUsage() (uint64, error) + + // TotalSize returns the current maximum size of the Memory in bytes. The + // value returned by TotalSize is permitted to change. + TotalSize() uint64 + + // Destroy releases all resources associated with the Memory. + // + // Preconditions: There are no remaining uses of any of the freed memory's + // frames. + // + // Postconditions: None of the Memory's methods may be called after Destroy. + Destroy() + + // SaveTo saves the memory state to the given stream, which will + // generally be a statefile. + SaveTo(w io.Writer) error + + // LoadFrom loads the memory state from the given stream, which will + // generally be a statefile. + LoadFrom(r io.Reader) error +} + +// AllocateAndFill allocates memory of the given kind from mem and fills it by +// calling r.ReadToBlocks() repeatedly until either length bytes are read or a +// non-nil error is returned. It returns the memory filled by r, truncated down +// to the nearest page. If this is shorter than length bytes due to an error +// returned by r.ReadToBlocks(), it returns that error. +// +// Preconditions: length > 0. length must be page-aligned. +func AllocateAndFill(mem Memory, length uint64, kind usage.MemoryKind, r safemem.Reader) (FileRange, error) { + fr, err := mem.Allocate(length, kind) + if err != nil { + return FileRange{}, err + } + dsts, err := mem.MapInternal(fr, usermem.Write) + if err != nil { + mem.DecRef(fr) + return FileRange{}, err + } + n, err := safemem.ReadFullToBlocks(r, dsts) + un := uint64(usermem.Addr(n).RoundDown()) + if un < length { + // Free unused memory and update fr to contain only the memory that is + // still allocated. + mem.DecRef(FileRange{fr.Start + un, fr.End}) + fr.End = fr.Start + un + } + return fr, err +} diff --git a/pkg/sentry/platform/procid/BUILD b/pkg/sentry/platform/procid/BUILD new file mode 100644 index 000000000..5db4f6261 --- /dev/null +++ b/pkg/sentry/platform/procid/BUILD @@ -0,0 +1,32 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "procid", + srcs = [ + "procid.go", + "procid_amd64.s", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid", + visibility = ["//pkg/sentry:internal"], +) + +go_test( + name = "procid_test", + size = "small", + srcs = [ + "procid_test.go", + ], + embed = [":procid"], +) + +go_test( + name = "procid_net_test", + size = "small", + srcs = [ + "procid_net_test.go", + "procid_test.go", + ], + embed = [":procid"], +) diff --git a/pkg/sentry/platform/procid/procid.go b/pkg/sentry/platform/procid/procid.go new file mode 100644 index 000000000..5f861908f --- /dev/null +++ b/pkg/sentry/platform/procid/procid.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procid provides a way to get the current system thread identifier. +package procid + +// Current returns the current system thread identifier. +// +// Precondition: This should only be called with the runtime OS thread locked. +func Current() uint64 diff --git a/pkg/sentry/platform/procid/procid_amd64.s b/pkg/sentry/platform/procid/procid_amd64.s new file mode 100644 index 000000000..ead4e3d91 --- /dev/null +++ b/pkg/sentry/platform/procid/procid_amd64.s @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 +// +build go1.8 +// +build !go1.11 + +#include "textflag.h" + +TEXT ·Current(SB),NOSPLIT,$0-8 + // The offset specified here is the m_procid offset for Go1.8+. + // Changes to this offset should be caught by the tests, and major + // version changes require an explicit tag change above. + MOVQ TLS, AX + MOVQ 0(AX)(TLS*1), AX + MOVQ 48(AX), AX // g_m (may change in future versions) + MOVQ 72(AX), AX // m_procid (may change in future versions) + MOVQ AX, ret+0(FP) + RET diff --git a/pkg/sentry/platform/procid/procid_net_test.go b/pkg/sentry/platform/procid/procid_net_test.go new file mode 100644 index 000000000..2d1605a08 --- /dev/null +++ b/pkg/sentry/platform/procid/procid_net_test.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procid + +// This file is just to force the inclusion of the "net" package, which will +// make the test binary a cgo one. +import ( + _ "net" +) diff --git a/pkg/sentry/platform/procid/procid_test.go b/pkg/sentry/platform/procid/procid_test.go new file mode 100644 index 000000000..5e44da36f --- /dev/null +++ b/pkg/sentry/platform/procid/procid_test.go @@ -0,0 +1,85 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procid + +import ( + "os" + "runtime" + "sync" + "syscall" + "testing" +) + +// runOnMain is used to send functions to run on the main (initial) thread. +var runOnMain = make(chan func(), 10) + +func checkProcid(t *testing.T, start *sync.WaitGroup, done *sync.WaitGroup) { + defer done.Done() + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + start.Done() + start.Wait() + + procID := Current() + tid := syscall.Gettid() + + if procID != uint64(tid) { + t.Logf("Bad procid: expected %v, got %v", tid, procID) + t.Fail() + } +} + +func TestProcidInitialized(t *testing.T) { + var start sync.WaitGroup + var done sync.WaitGroup + + count := 100 + start.Add(count + 1) + done.Add(count + 1) + + // Run the check on the main thread. + // + // When cgo is not included, the only case when procid isn't initialized + // is in the main (initial) thread, so we have to test this case + // specifically. + runOnMain <- func() { + checkProcid(t, &start, &done) + } + + // Run the check on a number of different threads. + for i := 0; i < count; i++ { + go checkProcid(t, &start, &done) + } + + done.Wait() +} + +func TestMain(m *testing.M) { + // Make sure we remain at the main (initial) thread. + runtime.LockOSThread() + + // Start running tests in a different goroutine. + go func() { + os.Exit(m.Run()) + }() + + // Execute any functions that have been sent for execution in the main + // thread. + for f := range runOnMain { + f() + } +} diff --git a/pkg/sentry/platform/ptrace/BUILD b/pkg/sentry/platform/ptrace/BUILD new file mode 100644 index 000000000..16b0b3c69 --- /dev/null +++ b/pkg/sentry/platform/ptrace/BUILD @@ -0,0 +1,31 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ptrace", + srcs = [ + "ptrace.go", + "ptrace_unsafe.go", + "stub_amd64.s", + "stub_unsafe.go", + "subprocess.go", + "subprocess_amd64.go", + "subprocess_linux.go", + "subprocess_linux_amd64_unsafe.go", + "subprocess_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ptrace", + visibility = ["//:sandbox"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/arch", + "//pkg/sentry/platform", + "//pkg/sentry/platform/filemem", + "//pkg/sentry/platform/interrupt", + "//pkg/sentry/platform/procid", + "//pkg/sentry/platform/safecopy", + "//pkg/sentry/usermem", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/sentry/platform/ptrace/ptrace.go b/pkg/sentry/platform/ptrace/ptrace.go new file mode 100644 index 000000000..05f8b1d05 --- /dev/null +++ b/pkg/sentry/platform/ptrace/ptrace.go @@ -0,0 +1,242 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ptrace provides a ptrace-based implementation of the platform +// interface. This is useful for development and testing purposes primarily, +// and runs on stock kernels without special permissions. +// +// In a nutshell, it works as follows: +// +// The creation of a new address space creates a new child processes with a +// single thread which is traced by a single goroutine. +// +// A context is just a collection of temporary variables. Calling Switch on a +// context does the following: +// +// Locks the runtime thread. +// +// Looks up a traced subprocess thread for the current runtime thread. If +// none exists, the dedicated goroutine is asked to create a new stopped +// thread in the subprocess. This stopped subprocess thread is then traced +// by the current thread and this information is stored for subsequent +// switches. +// +// The context is then bound with information about the subprocess thread +// so that the context may be appropriately interrupted via a signal. +// +// The requested operation is performed in the traced subprocess thread +// (e.g. set registers, execute, return). +// +// FIXME: This package is currently sloppy with cleanup. +// +// Lock order: +// +// subprocess.mu +// context.mu +package ptrace + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/filemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/interrupt" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +var ( + // stubStart is the link address for our stub, and determines the + // maximum user address. This is valid only after a call to stubInit. + // + // We attempt to link the stub here, and adjust downward as needed. + stubStart uintptr = 0x7fffffff0000 + + // stubEnd is the first byte past the end of the stub, as with + // stubStart this is valid only after a call to stubInit. + stubEnd uintptr + + // stubInitialized controls one-time stub initialization. + stubInitialized sync.Once +) + +type context struct { + // signalInfo is the signal info, if and when a signal is received. + signalInfo arch.SignalInfo + + // interrupt is the interrupt context. + interrupt interrupt.Forwarder + + // mu protects the following fields. + mu sync.Mutex + + // If lastFaultSP is non-nil, the last context switch was due to a fault + // received while executing lastFaultSP. Only context.Switch may set + // lastFaultSP to a non-nil value. + lastFaultSP *subprocess + + // lastFaultAddr is the last faulting address; this is only meaningful if + // lastFaultSP is non-nil. + lastFaultAddr usermem.Addr + + // lastFaultIP is the address of the last faulting instruction; + // this is also only meaningful if lastFaultSP is non-nil. + lastFaultIP usermem.Addr +} + +// Switch runs the provided context in the given address space. +func (c *context) Switch(as platform.AddressSpace, ac arch.Context, cpu int32) (*arch.SignalInfo, usermem.AccessType, error) { + s := as.(*subprocess) + isSyscall := s.switchToApp(c, ac) + + var faultSP *subprocess + var faultAddr usermem.Addr + var faultIP usermem.Addr + if !isSyscall && linux.Signal(c.signalInfo.Signo) == linux.SIGSEGV { + faultSP = s + faultAddr = usermem.Addr(c.signalInfo.Addr()) + faultIP = usermem.Addr(ac.IP()) + } + + // Update the context to reflect the outcome of this context switch. + c.mu.Lock() + lastFaultSP := c.lastFaultSP + lastFaultAddr := c.lastFaultAddr + lastFaultIP := c.lastFaultIP + // At this point, c may not yet be in s.contexts, so c.lastFaultSP won't be + // updated by s.Unmap(). This is fine; we only need to synchronize with + // calls to s.Unmap() that occur after the handling of this fault. + c.lastFaultSP = faultSP + c.lastFaultAddr = faultAddr + c.lastFaultIP = faultIP + c.mu.Unlock() + + // Update subprocesses to reflect the outcome of this context switch. + if lastFaultSP != faultSP { + if lastFaultSP != nil { + lastFaultSP.mu.Lock() + delete(lastFaultSP.contexts, c) + lastFaultSP.mu.Unlock() + } + if faultSP != nil { + faultSP.mu.Lock() + faultSP.contexts[c] = struct{}{} + faultSP.mu.Unlock() + } + } + + if isSyscall { + return nil, usermem.NoAccess, nil + } + if faultSP == nil { + // Non-fault signal. + return &c.signalInfo, usermem.NoAccess, platform.ErrContextSignal + } + + // Got a page fault. Ideally, we'd get real fault type here, but ptrace + // doesn't expose this information. Instead, we use a simple heuristic: + // + // It was an instruction fault iff the faulting addr == instruction + // pointer. + // + // It was a write fault if the fault is immediately repeated. + at := usermem.Read + if faultAddr == faultIP { + at.Execute = true + } + if lastFaultSP == faultSP && + lastFaultAddr == faultAddr && + lastFaultIP == faultIP { + at.Write = true + } + return &c.signalInfo, at, platform.ErrContextSignal +} + +// Interrupt interrupts the running guest application associated with this context. +func (c *context) Interrupt() { + c.interrupt.NotifyInterrupt() +} + +// PTrace represents a collection of ptrace subprocesses. +type PTrace struct { + platform.MMapMinAddr + platform.NoCPUPreemptionDetection + *filemem.FileMem +} + +// New returns a new ptrace-based implementation of the platform interface. +func New() (*PTrace, error) { + stubInitialized.Do(func() { + // Initialize the stub. + stubInit() + + // Create the master process for the global pool. This must be + // done before initializing any other processes. + master, err := newSubprocess(createStub) + if err != nil { + // Should never happen. + panic("unable to initialize ptrace master: " + err.Error()) + } + + // Set the master on the globalPool. + globalPool.master = master + }) + + fm, err := filemem.New("ptrace-memory") + if err != nil { + return nil, err + } + + return &PTrace{FileMem: fm}, nil +} + +// SupportsAddressSpaceIO implements platform.Platform.SupportsAddressSpaceIO. +func (*PTrace) SupportsAddressSpaceIO() bool { + return false +} + +// CooperativelySchedulesAddressSpace implements platform.Platform.CooperativelySchedulesAddressSpace. +func (*PTrace) CooperativelySchedulesAddressSpace() bool { + return false +} + +// MapUnit implements platform.Platform.MapUnit. +func (*PTrace) MapUnit() uint64 { + // The host kernel manages page tables and arbitrary-sized mappings + // have effectively the same cost. + return 0 +} + +// MaxUserAddress returns the first address that may not be used by user +// applications. +func (*PTrace) MaxUserAddress() usermem.Addr { + return usermem.Addr(stubStart) +} + +// NewAddressSpace returns a new subprocess. +func (p *PTrace) NewAddressSpace(_ interface{}) (platform.AddressSpace, <-chan struct{}, error) { + as, err := newSubprocess(globalPool.master.createStub) + return as, nil, err +} + +// NewContext returns an interruptible context. +func (*PTrace) NewContext() platform.Context { + return &context{} +} + +// Memory returns the platform memory used to do allocations. +func (p *PTrace) Memory() platform.Memory { + return p.FileMem +} diff --git a/pkg/sentry/platform/ptrace/ptrace_unsafe.go b/pkg/sentry/platform/ptrace/ptrace_unsafe.go new file mode 100644 index 000000000..b55b2795a --- /dev/null +++ b/pkg/sentry/platform/ptrace/ptrace_unsafe.go @@ -0,0 +1,166 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ptrace + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// GETREGSET/SETREGSET register set types. +// +// See include/uapi/linux/elf.h. +const ( + // _NT_PRFPREG is for x86 floating-point state without using xsave. + _NT_PRFPREG = 0x2 + + // _NT_X86_XSTATE is for x86 extended state using xsave. + _NT_X86_XSTATE = 0x202 +) + +// fpRegSet returns the GETREGSET/SETREGSET register set type to be used. +func fpRegSet(useXsave bool) uintptr { + if useXsave { + return _NT_X86_XSTATE + } + return _NT_PRFPREG +} + +// getRegs sets the regular register set. +func (t *thread) getRegs(regs *syscall.PtraceRegs) error { + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_GETREGS, + uintptr(t.tid), + 0, + uintptr(unsafe.Pointer(regs)), + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +// setRegs sets the regular register set. +func (t *thread) setRegs(regs *syscall.PtraceRegs) error { + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_SETREGS, + uintptr(t.tid), + 0, + uintptr(unsafe.Pointer(regs)), + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +// getFPRegs gets the floating-point data via the GETREGSET ptrace syscall. +func (t *thread) getFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsave bool) error { + iovec := syscall.Iovec{ + Base: (*byte)(fpState), + Len: fpLen, + } + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_GETREGSET, + uintptr(t.tid), + fpRegSet(useXsave), + uintptr(unsafe.Pointer(&iovec)), + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +// setFPRegs sets the floating-point data via the SETREGSET ptrace syscall. +func (t *thread) setFPRegs(fpState *arch.FloatingPointData, fpLen uint64, useXsave bool) error { + iovec := syscall.Iovec{ + Base: (*byte)(fpState), + Len: fpLen, + } + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_SETREGSET, + uintptr(t.tid), + fpRegSet(useXsave), + uintptr(unsafe.Pointer(&iovec)), + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +// getSignalInfo retrieves information about the signal that caused the stop. +func (t *thread) getSignalInfo(si *arch.SignalInfo) error { + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_GETSIGINFO, + uintptr(t.tid), + 0, + uintptr(unsafe.Pointer(si)), + 0, 0) + if errno != 0 { + return errno + } + return nil +} + +// clone creates a new thread from this one. +// +// The returned thread will be stopped and available for any system thread to +// call attach on it. +// +// Precondition: the OS thread must be locked and own t. +func (t *thread) clone(initRegs *syscall.PtraceRegs) (*thread, error) { + r, ok := usermem.Addr(initRegs.Rsp).RoundUp() + if !ok { + return nil, syscall.EINVAL + } + rval, err := t.syscallIgnoreInterrupt( + initRegs, + syscall.SYS_CLONE, + arch.SyscallArgument{Value: uintptr( + syscall.CLONE_FILES | + syscall.CLONE_FS | + syscall.CLONE_SIGHAND | + syscall.CLONE_THREAD | + syscall.CLONE_PTRACE | + syscall.CLONE_VM)}, + // The stack pointer is just made up, but we have it be + // something sensible so the kernel doesn't think we're + // up to no good. Which we are. + arch.SyscallArgument{Value: uintptr(r)}, + arch.SyscallArgument{}, + arch.SyscallArgument{}, + // We use these registers initially, but really they + // could be anything. We're going to stop immediately. + arch.SyscallArgument{Value: uintptr(unsafe.Pointer(initRegs))}) + if err != nil { + return nil, err + } + + return &thread{ + tgid: t.tgid, + tid: int32(rval), + cpu: ^uint32(0), + }, nil +} diff --git a/pkg/sentry/platform/ptrace/stub_amd64.s b/pkg/sentry/platform/ptrace/stub_amd64.s new file mode 100644 index 000000000..9bf87b6f6 --- /dev/null +++ b/pkg/sentry/platform/ptrace/stub_amd64.s @@ -0,0 +1,114 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "funcdata.h" +#include "textflag.h" + +#define SYS_GETPID 39 +#define SYS_EXIT 60 +#define SYS_KILL 62 +#define SYS_GETPPID 110 +#define SYS_PRCTL 157 + +#define SIGKILL 9 +#define SIGSTOP 19 + +#define PR_SET_PDEATHSIG 1 + +// stub bootstraps the child and sends itself SIGSTOP to wait for attach. +// +// R15 contains the expected PPID. R15 is used instead of a more typical DI +// since syscalls will clobber DI and createStub wants to pass a new PPID to +// grandchildren. +// +// This should not be used outside the context of a new ptrace child (as the +// function is otherwise a bunch of nonsense). +TEXT ·stub(SB),NOSPLIT,$0 +begin: + // N.B. This loop only executes in the context of a single-threaded + // fork child. + + MOVQ $SYS_PRCTL, AX + MOVQ $PR_SET_PDEATHSIG, DI + MOVQ $SIGKILL, SI + SYSCALL + + CMPQ AX, $0 + JNE error + + // If the parent already died before we called PR_SET_DEATHSIG then + // we'll have an unexpected PPID. + MOVQ $SYS_GETPPID, AX + SYSCALL + + CMPQ AX, $0 + JL error + + CMPQ AX, R15 + JNE parent_dead + + MOVQ $SYS_GETPID, AX + SYSCALL + + CMPQ AX, $0 + JL error + + // SIGSTOP to wait for attach. + // + // The SYSCALL instruction will be used for future syscall injection by + // thread.syscall. + MOVQ AX, DI + MOVQ $SYS_KILL, AX + MOVQ $SIGSTOP, SI + SYSCALL + + // The tracer may "detach" and/or allow code execution here in three cases: + // + // 1. New (traced) stub threads are explicitly detached by the + // goroutine in newSubprocess. However, they are detached while in + // group-stop, so they do not execute code here. + // + // 2. If a tracer thread exits, it implicitly detaches from the stub, + // potentially allowing code execution here. However, the Go runtime + // never exits individual threads, so this case never occurs. + // + // 3. subprocess.createStub clones a new stub process that is untraced, + // thus executing this code. We setup the PDEATHSIG before SIGSTOPing + // ourselves for attach by the tracer. + // + // R15 has been updated with the expected PPID. + JMP begin + +error: + // Exit with -errno. + MOVQ AX, DI + NEGQ DI + MOVQ $SYS_EXIT, AX + SYSCALL + HLT + +parent_dead: + MOVQ $SYS_EXIT, AX + MOVQ $1, DI + SYSCALL + HLT + +// stubCall calls the stub function at the given address with the given PPID. +// +// This is a distinct function because stub, above, may be mapped at any +// arbitrary location, and stub has a specific binary API (see above). +TEXT ·stubCall(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), AX + MOVQ pid+8(FP), R15 + JMP AX diff --git a/pkg/sentry/platform/ptrace/stub_unsafe.go b/pkg/sentry/platform/ptrace/stub_unsafe.go new file mode 100644 index 000000000..c868a2d68 --- /dev/null +++ b/pkg/sentry/platform/ptrace/stub_unsafe.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ptrace + +import ( + "reflect" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// stub is defined in arch-specific assembly. +func stub() + +// stubCall calls the stub at the given address with the given pid. +func stubCall(addr, pid uintptr) + +// unsafeSlice returns a slice for the given address and length. +func unsafeSlice(addr uintptr, length int) (slice []byte) { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + sh.Data = addr + sh.Len = length + sh.Cap = length + return +} + +// stubInit initializes the stub. +func stubInit() { + // Grab the existing stub. + stubBegin := reflect.ValueOf(stub).Pointer() + stubLen := int(safecopy.FindEndAddress(stubBegin) - stubBegin) + stubSlice := unsafeSlice(stubBegin, stubLen) + mapLen := uintptr(stubLen) + if offset := mapLen % usermem.PageSize; offset != 0 { + mapLen += usermem.PageSize - offset + } + + for stubStart > 0 { + // Map the target address for the stub. + // + // We don't use FIXED here because we don't want to unmap + // something that may have been there already. We just walk + // down the address space until we find a place where the stub + // can be placed. + addr, _, errno := syscall.RawSyscall6( + syscall.SYS_MMAP, + stubStart, + mapLen, + syscall.PROT_WRITE|syscall.PROT_READ, + syscall.MAP_PRIVATE|syscall.MAP_ANONYMOUS, + 0 /* fd */, 0 /* offset */) + if addr != stubStart || errno != 0 { + if addr != 0 { + // Unmap the region we've mapped accidentally. + syscall.RawSyscall(syscall.SYS_MUNMAP, addr, mapLen, 0) + } + + // Attempt to begin at a lower address. + stubStart -= uintptr(usermem.PageSize) + continue + } + + // Copy the stub to the address. + targetSlice := unsafeSlice(addr, stubLen) + copy(targetSlice, stubSlice) + + // Make the stub executable. + if _, _, errno := syscall.RawSyscall( + syscall.SYS_MPROTECT, + stubStart, + mapLen, + syscall.PROT_EXEC|syscall.PROT_READ); errno != 0 { + panic("mprotect failed: " + errno.Error()) + } + + // Set the end. + stubEnd = stubStart + mapLen + return + } + + // This will happen only if we exhaust the entire address + // space, and it will take a long, long time. + panic("failed to map stub") +} diff --git a/pkg/sentry/platform/ptrace/subprocess.go b/pkg/sentry/platform/ptrace/subprocess.go new file mode 100644 index 000000000..0d6a38f15 --- /dev/null +++ b/pkg/sentry/platform/ptrace/subprocess.go @@ -0,0 +1,559 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ptrace + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// globalPool exists to solve two distinct problems: +// +// 1) Subprocesses can't always be killed properly (see Release). +// +// 2) Any seccomp filters that have been installed will apply to subprocesses +// created here. Therefore we use the intermediary (master), which is created +// on initialization of the platform. +var globalPool struct { + mu sync.Mutex + master *subprocess + available []*subprocess +} + +// thread is a traced thread; it is a thread identifier. +// +// This is a convenience type for defining ptrace operations. +type thread struct { + tgid int32 + tid int32 + cpu uint32 +} + +// threadPool is a collection of threads. +type threadPool struct { + // mu protects below. + mu sync.Mutex + + // threads is the collection of threads. + // + // This map is indexed by system TID (the calling thread); which will + // be the tracer for the given *thread, and therefore capable of using + // relevant ptrace calls. + threads map[int32]*thread +} + +// lookupOrCreate looks up a given thread or creates one. +// +// newThread will generally be subprocess.newThread. +// +// Precondition: the runtime OS thread must be locked. +func (tp *threadPool) lookupOrCreate(currentTID int32, newThread func() *thread) *thread { + tp.mu.Lock() + t, ok := tp.threads[currentTID] + if !ok { + // Before creating a new thread, see if we can find a thread + // whose system tid has disappeared. + // + // TODO: Other parts of this package depend on + // threads never exiting. + for origTID, t := range tp.threads { + // Signal zero is an easy existence check. + if err := syscall.Tgkill(syscall.Getpid(), int(origTID), 0); err != nil { + // This thread has been abandoned; reuse it. + delete(tp.threads, origTID) + tp.threads[currentTID] = t + tp.mu.Unlock() + return t + } + } + + // Create a new thread. + t = newThread() + tp.threads[currentTID] = t + } + tp.mu.Unlock() + return t +} + +// subprocess is a collection of threads being traced. +type subprocess struct { + platform.NoAddressSpaceIO + + // initRegs are the initial registers for the first thread. + // + // These are used for the register set for system calls. + initRegs syscall.PtraceRegs + + // requests is used to signal creation of new threads. + requests chan chan *thread + + // sysemuThreads are reserved for emulation. + sysemuThreads threadPool + + // syscallThreads are reserved for syscalls (except clone, which is + // handled in the dedicated goroutine corresponding to requests above). + syscallThreads threadPool + + // mu protects the following fields. + mu sync.Mutex + + // contexts is the set of contexts for which it's possible that + // context.lastFaultSP == this subprocess. + contexts map[*context]struct{} +} + +// newSubprocess returns a useable subprocess. +// +// This will either be a newly created subprocess, or one from the global pool. +// The create function will be called in the latter case, which is guaranteed +// to happen with the runtime thread locked. +func newSubprocess(create func() (*thread, error)) (*subprocess, error) { + // See Release. + globalPool.mu.Lock() + if len(globalPool.available) > 0 { + sp := globalPool.available[len(globalPool.available)-1] + globalPool.available = globalPool.available[:len(globalPool.available)-1] + globalPool.mu.Unlock() + return sp, nil + } + globalPool.mu.Unlock() + + // The following goroutine is responsible for creating the first traced + // thread, and responding to requests to make additional threads in the + // traced process. The process will be killed and reaped when the + // request channel is closed, which happens in Release below. + var initRegs syscall.PtraceRegs + errChan := make(chan error) + requests := make(chan chan *thread) + go func() { // S/R-SAFE: Platform-related. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Initialize the first thread. + firstThread, err := create() + if err != nil { + errChan <- err + return + } + + // Grab registers. + // + // Note that we adjust the current register RIP value to be + // just before the current system call executed. This depends + // on the definition of the stub itself. + if err := firstThread.getRegs(&initRegs); err != nil { + panic(fmt.Sprintf("ptrace get regs failed: %v", err)) + } + initRegs.Rip -= initRegsRipAdjustment + + // Ready to handle requests. + errChan <- nil + + // Wait for requests to create threads. + for r := range requests { + t, err := firstThread.clone(&initRegs) + if err != nil { + // Should not happen: not recoverable. + panic(fmt.Sprintf("error initializing first thread: %v", err)) + } + + // Since the new thread was created with + // clone(CLONE_PTRACE), it will begin execution with + // SIGSTOP pending and with this thread as its tracer. + // (Hopefully nobody tgkilled it with a signal < + // SIGSTOP before the SIGSTOP was delivered, in which + // case that signal would be delivered before SIGSTOP.) + if sig := t.wait(); sig != syscall.SIGSTOP { + panic(fmt.Sprintf("error waiting for new clone: expected SIGSTOP, got %v", sig)) + } + + // Detach the thread without suppressing the SIGSTOP, + // causing it to enter group-stop. + if _, _, errno := syscall.RawSyscall6(syscall.SYS_PTRACE, syscall.PTRACE_DETACH, uintptr(t.tid), 0, uintptr(syscall.SIGSTOP), 0, 0); errno != 0 { + panic(fmt.Sprintf("can't detach new clone: %v", errno)) + } + + // Return the thread. + r <- t + } + + // Requests should never be closed. + panic("unreachable") + }() + + // Wait until error or readiness. + if err := <-errChan; err != nil { + return nil, err + } + + // Ready. + sp := &subprocess{ + initRegs: initRegs, + requests: requests, + sysemuThreads: threadPool{ + threads: make(map[int32]*thread), + }, + syscallThreads: threadPool{ + threads: make(map[int32]*thread), + }, + contexts: make(map[*context]struct{}), + } + + sp.unmap() + return sp, nil +} + +// unmap unmaps non-stub regions of the process. +// +// This will panic on failure (which should never happen). +func (s *subprocess) unmap() { + s.Unmap(0, uint64(stubStart)) + if maximumUserAddress != stubEnd { + s.Unmap(usermem.Addr(stubEnd), uint64(maximumUserAddress-stubEnd)) + } +} + +// Release kills the subprocess. +// +// Just kidding! We can't safely co-ordinate the detaching of all the +// tracees (since the tracers are random runtime threads, and the process +// won't exit until tracers have been notifier). +// +// Therefore we simply unmap everything in the subprocess and return it to the +// globalPool. This has the added benefit of reducing creation time for new +// subprocesses. +func (s *subprocess) Release() error { + go func() { // S/R-SAFE: Platform. + s.unmap() + globalPool.mu.Lock() + globalPool.available = append(globalPool.available, s) + globalPool.mu.Unlock() + }() + return nil +} + +// newThread creates a new traced thread. +// +// Precondition: the OS thread must be locked. +func (s *subprocess) newThread() *thread { + // Ask the first thread to create a new one. + r := make(chan *thread) + s.requests <- r + t := <-r + + // Attach the subprocess to this one. + t.attach() + + // Return the new thread, which is now bound. + return t +} + +// attach attachs to the thread. +func (t *thread) attach() { + if _, _, errno := syscall.RawSyscall(syscall.SYS_PTRACE, syscall.PTRACE_ATTACH, uintptr(t.tid), 0); errno != 0 { + panic(fmt.Sprintf("unable to attach: %v", errno)) + } + + // PTRACE_ATTACH sends SIGSTOP, and wakes the tracee if it was already + // stopped from the SIGSTOP queued by CLONE_PTRACE (see inner loop of + // newSubprocess), so we always expect to see signal-delivery-stop with + // SIGSTOP. + if sig := t.wait(); sig != syscall.SIGSTOP { + panic(fmt.Sprintf("wait failed: expected SIGSTOP, got %v", sig)) + } + + // Initialize options. + t.init() +} + +// wait waits for a stop event. +func (t *thread) wait() syscall.Signal { + var status syscall.WaitStatus + + for { + r, err := syscall.Wait4(int(t.tid), &status, syscall.WALL|syscall.WUNTRACED, nil) + if err == syscall.EINTR || err == syscall.EAGAIN { + // Wait was interrupted; wait again. + continue + } else if err != nil { + panic(fmt.Sprintf("ptrace wait failed: %v", err)) + } + if int(r) != int(t.tid) { + panic(fmt.Sprintf("ptrace wait returned %v, expected %v", r, t.tid)) + } + if !status.Stopped() { + panic(fmt.Sprintf("ptrace status unexpected: got %v, wanted stopped", status)) + } + if status.StopSignal() == 0 { + continue // Spurious stop. + } + return status.StopSignal() + } +} + +// init initializes trace options. +func (t *thread) init() { + // Set our TRACESYSGOOD option to differeniate real SIGTRAP. + _, _, errno := syscall.RawSyscall6( + syscall.SYS_PTRACE, + syscall.PTRACE_SETOPTIONS, + uintptr(t.tid), + 0, + syscall.PTRACE_O_TRACESYSGOOD, + 0, 0) + if errno != 0 { + panic(fmt.Sprintf("ptrace set options failed: %v", errno)) + } +} + +// syscall executes a system call cycle in the traced context. +// +// This is _not_ for use by application system calls, rather it is for use when +// a system call must be injected into the remote context (e.g. mmap, munmap). +// Note that clones are handled separately. +func (t *thread) syscall(regs *syscall.PtraceRegs) (uintptr, error) { + // Set registers. + if err := t.setRegs(regs); err != nil { + panic(fmt.Sprintf("ptrace set regs failed: %v", err)) + } + + for { + // Execute the syscall instruction. + if _, _, errno := syscall.RawSyscall(syscall.SYS_PTRACE, syscall.PTRACE_SYSCALL, uintptr(t.tid), 0); errno != 0 { + panic(fmt.Sprintf("ptrace syscall-enter failed: %v", errno)) + } + + sig := t.wait() + if sig == (0x80 | syscall.SIGTRAP) { + // Reached syscall-enter-stop. + break + } else { + // Some other signal caused a thread stop; ignore. + continue + } + } + + // Complete the actual system call. + if _, _, errno := syscall.RawSyscall(syscall.SYS_PTRACE, syscall.PTRACE_SYSCALL, uintptr(t.tid), 0); errno != 0 { + panic(fmt.Sprintf("ptrace syscall-enter failed: %v", errno)) + } + + // Wait for syscall-exit-stop. "[Signal-delivery-stop] never happens + // between syscall-enter-stop and syscall-exit-stop; it happens *after* + // syscall-exit-stop.)" - ptrace(2), "Syscall-stops" + if sig := t.wait(); sig != (0x80 | syscall.SIGTRAP) { + panic(fmt.Sprintf("wait failed: expected SIGTRAP, got %v [%d]", sig, sig)) + } + + // Grab registers. + if err := t.getRegs(regs); err != nil { + panic(fmt.Sprintf("ptrace get regs failed: %v", err)) + } + + return syscallReturnValue(regs) +} + +// syscallIgnoreInterrupt ignores interrupts on the system call thread and +// restarts the syscall if the kernel indicates that should happen. +func (t *thread) syscallIgnoreInterrupt( + initRegs *syscall.PtraceRegs, + sysno uintptr, + args ...arch.SyscallArgument) (uintptr, error) { + for { + regs := createSyscallRegs(initRegs, sysno, args...) + rval, err := t.syscall(®s) + switch err { + case ERESTARTSYS: + continue + case ERESTARTNOINTR: + continue + case ERESTARTNOHAND: + continue + default: + return rval, err + } + } +} + +// NotifyInterrupt implements interrupt.Receiver.NotifyInterrupt. +func (t *thread) NotifyInterrupt() { + syscall.Tgkill(int(t.tgid), int(t.tid), syscall.Signal(platform.SignalInterrupt)) +} + +// switchToApp is called from the main SwitchToApp entrypoint. +// +// This function returns true on a system call, false on a signal. +func (s *subprocess) switchToApp(c *context, ac arch.Context) bool { + regs := &ac.StateData().Regs + s.resetSysemuRegs(regs) + + // Extract floating point state. + fpState := ac.FloatingPointData() + fpLen, _ := ac.FeatureSet().ExtendedStateSize() + useXsave := ac.FeatureSet().UseXsave() + + // Lock the thread for ptrace operations. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Grab our thread from the pool. + currentTID := int32(procid.Current()) + t := s.sysemuThreads.lookupOrCreate(currentTID, s.newThread) + + // Check for interrupts, and ensure that future interrupts will signal t. + if !c.interrupt.Enable(t) { + // Pending interrupt; simulate. + c.signalInfo = arch.SignalInfo{Signo: int32(platform.SignalInterrupt)} + return false + } + defer c.interrupt.Disable() + + // Ensure that the CPU set is bound appropriately; this makes the + // emulation below several times faster, presumably by avoiding + // interprocessor wakeups and by simplifying the schedule. + t.bind() + + // Set registers. + if err := t.setRegs(regs); err != nil { + panic(fmt.Sprintf("ptrace set regs failed: %v", err)) + } + if err := t.setFPRegs(fpState, uint64(fpLen), useXsave); err != nil { + panic(fmt.Sprintf("ptrace set fpregs failed: %v", err)) + } + + for { + // Start running until the next system call. + if isSingleStepping(regs) { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_PTRACE, + syscall.PTRACE_SYSEMU_SINGLESTEP, + uintptr(t.tid), 0); errno != 0 { + panic(fmt.Sprintf("ptrace sysemu failed: %v", errno)) + } + } else { + if _, _, errno := syscall.RawSyscall( + syscall.SYS_PTRACE, + syscall.PTRACE_SYSEMU, + uintptr(t.tid), 0); errno != 0 { + panic(fmt.Sprintf("ptrace sysemu failed: %v", errno)) + } + } + + // Wait for the syscall-enter stop. + sig := t.wait() + + // Refresh all registers. + if err := t.getRegs(regs); err != nil { + panic(fmt.Sprintf("ptrace get regs failed: %v", err)) + } + if err := t.getFPRegs(fpState, uint64(fpLen), useXsave); err != nil { + panic(fmt.Sprintf("ptrace get fpregs failed: %v", err)) + } + + // Is it a system call? + if sig == (0x80 | syscall.SIGTRAP) { + // Ensure registers are sane. + updateSyscallRegs(regs) + return true + } + + if sig == syscall.SIGSTOP { + // SIGSTOP was delivered to another thread in the same thread + // group, which initiated another group stop. Just ignore it. + continue + } + + // Grab signal information. + if err := t.getSignalInfo(&c.signalInfo); err != nil { + // Should never happen. + panic(fmt.Sprintf("ptrace get signal info failed: %v", err)) + } + + // We have a signal. We verify however, that the signal was + // either delivered from the kernel or from this process. We + // don't respect other signals. + if c.signalInfo.Code > 0 { + return false // kernel. + } else if c.signalInfo.Code <= 0 && c.signalInfo.Pid() == int32(os.Getpid()) { + return false // this process. + } + } +} + +// syscall executes the given system call without handling interruptions. +func (s *subprocess) syscall(sysno uintptr, args ...arch.SyscallArgument) (uintptr, error) { + // Grab a thread. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + currentTID := int32(procid.Current()) + t := s.syscallThreads.lookupOrCreate(currentTID, s.newThread) + + return t.syscallIgnoreInterrupt(&s.initRegs, sysno, args...) +} + +// MapFile implements platform.AddressSpace.MapFile. +func (s *subprocess) MapFile(addr usermem.Addr, fd int, fr platform.FileRange, at usermem.AccessType, precommit bool) error { + var flags int + if precommit { + flags |= syscall.MAP_POPULATE + } + _, err := s.syscall( + syscall.SYS_MMAP, + arch.SyscallArgument{Value: uintptr(addr)}, + arch.SyscallArgument{Value: uintptr(fr.Length())}, + arch.SyscallArgument{Value: uintptr(at.Prot())}, + arch.SyscallArgument{Value: uintptr(flags | syscall.MAP_SHARED | syscall.MAP_FIXED)}, + arch.SyscallArgument{Value: uintptr(fd)}, + arch.SyscallArgument{Value: uintptr(fr.Start)}) + return err +} + +// Unmap implements platform.AddressSpace.Unmap. +func (s *subprocess) Unmap(addr usermem.Addr, length uint64) { + ar, ok := addr.ToRange(length) + if !ok { + panic(fmt.Sprintf("addr %#x + length %#x overflows", addr, length)) + } + s.mu.Lock() + for c := range s.contexts { + c.mu.Lock() + if c.lastFaultSP == s && ar.Contains(c.lastFaultAddr) { + // Forget the last fault so that if c faults again, the fault isn't + // incorrectly reported as a write fault. If this is being called + // due to munmap() of the corresponding vma, handling of the second + // fault will fail anyway. + c.lastFaultSP = nil + delete(s.contexts, c) + } + c.mu.Unlock() + } + s.mu.Unlock() + _, err := s.syscall( + syscall.SYS_MUNMAP, + arch.SyscallArgument{Value: uintptr(addr)}, + arch.SyscallArgument{Value: uintptr(length)}) + if err != nil { + // We never expect this to happen. + panic(fmt.Sprintf("munmap(%x, %x)) failed: %v", addr, length, err)) + } +} diff --git a/pkg/sentry/platform/ptrace/subprocess_amd64.go b/pkg/sentry/platform/ptrace/subprocess_amd64.go new file mode 100644 index 000000000..8211215df --- /dev/null +++ b/pkg/sentry/platform/ptrace/subprocess_amd64.go @@ -0,0 +1,104 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ptrace + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" +) + +const ( + // maximumUserAddress is the largest possible user address. + maximumUserAddress = 0x7ffffffff000 + + // initRegsRipAdjustment is the size of the syscall instruction. + initRegsRipAdjustment = 2 +) + +// Linux kernel errnos which "should never be seen by user programs", but will +// be revealed to ptrace syscall exit tracing. +// +// These constants are used in subprocess.go. +const ( + ERESTARTSYS = syscall.Errno(512) + ERESTARTNOINTR = syscall.Errno(513) + ERESTARTNOHAND = syscall.Errno(514) +) + +// resetSysemuRegs sets up emulation registers. +// +// This should be called prior to calling sysemu. +func (s *subprocess) resetSysemuRegs(regs *syscall.PtraceRegs) { + regs.Cs = s.initRegs.Cs + regs.Ss = s.initRegs.Ss + regs.Ds = s.initRegs.Ds + regs.Es = s.initRegs.Es + regs.Fs = s.initRegs.Fs + regs.Gs = s.initRegs.Gs +} + +// createSyscallRegs sets up syscall registers. +// +// This should be called to generate registers for a system call. +func createSyscallRegs(initRegs *syscall.PtraceRegs, sysno uintptr, args ...arch.SyscallArgument) syscall.PtraceRegs { + // Copy initial registers (RIP, segments, etc.). + regs := *initRegs + + // Set our syscall number. + regs.Rax = uint64(sysno) + if len(args) >= 1 { + regs.Rdi = args[0].Uint64() + } + if len(args) >= 2 { + regs.Rsi = args[1].Uint64() + } + if len(args) >= 3 { + regs.Rdx = args[2].Uint64() + } + if len(args) >= 4 { + regs.R10 = args[3].Uint64() + } + if len(args) >= 5 { + regs.R8 = args[4].Uint64() + } + if len(args) >= 6 { + regs.R9 = args[5].Uint64() + } + + return regs +} + +// isSingleStepping determines if the registers indicate single-stepping. +func isSingleStepping(regs *syscall.PtraceRegs) bool { + return (regs.Eflags & arch.X86TrapFlag) != 0 +} + +// updateSyscallRegs updates registers after finishing sysemu. +func updateSyscallRegs(regs *syscall.PtraceRegs) { + // Ptrace puts -ENOSYS in rax on syscall-enter-stop. + regs.Rax = regs.Orig_rax +} + +// syscallReturnValue extracts a sensible return from registers. +func syscallReturnValue(regs *syscall.PtraceRegs) (uintptr, error) { + rval := int64(regs.Rax) + if rval < 0 { + return 0, syscall.Errno(-rval) + } + return uintptr(rval), nil +} diff --git a/pkg/sentry/platform/ptrace/subprocess_linux.go b/pkg/sentry/platform/ptrace/subprocess_linux.go new file mode 100644 index 000000000..227dd4882 --- /dev/null +++ b/pkg/sentry/platform/ptrace/subprocess_linux.go @@ -0,0 +1,146 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package ptrace + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/procid" +) + +// createStub creates a fresh stub processes. +// +// Precondition: the runtime OS thread must be locked. +func createStub() (*thread, error) { + // Declare all variables up front in order to ensure that there's no + // need for allocations between beforeFork & afterFork. + var ( + pid uintptr + ppid uintptr + errno syscall.Errno + ) + + // Remember the current ppid for the pdeathsig race. + ppid, _, _ = syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + + // Among other things, beforeFork masks all signals. + beforeFork() + pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) + if errno != 0 { + afterFork() + return nil, errno + } + + // Is this the parent? + if pid != 0 { + // Among other things, restore signal mask. + afterFork() + + // Initialize the first thread. + t := &thread{ + tgid: int32(pid), + tid: int32(pid), + cpu: ^uint32(0), + } + if sig := t.wait(); sig != syscall.SIGSTOP { + return nil, fmt.Errorf("wait failed: expected SIGSTOP, got %v", sig) + } + t.attach() + + return t, nil + } + + // afterForkInChild resets all signals to their default dispositions + // and restores the signal mask to its pre-fork state. + afterForkInChild() + + // Explicitly unmask all signals to ensure that the tracer can see + // them. + errno = unmaskAllSignals() + if errno != 0 { + syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) + } + + // Call the stub; should not return. + stubCall(stubStart, ppid) + panic("unreachable") +} + +// createStub creates a stub processes as a child of an existing subprocesses. +// +// Precondition: the runtime OS thread must be locked. +func (s *subprocess) createStub() (*thread, error) { + // There's no need to lock the runtime thread here, as this can only be + // called from a context that is already locked. + currentTID := int32(procid.Current()) + t := s.syscallThreads.lookupOrCreate(currentTID, s.newThread) + + // Pass the expected PPID to the child via R15. + regs := s.initRegs + regs.R15 = uint64(t.tgid) + + // Call fork in a subprocess. + // + // The new child must set up PDEATHSIG to ensure it dies if this + // process dies. Since this process could die at any time, this cannot + // be done via instrumentation from here. + // + // Instead, we create the child untraced, which will do the PDEATHSIG + // setup and then SIGSTOP itself for our attach below. + pid, err := t.syscallIgnoreInterrupt( + ®s, + syscall.SYS_CLONE, + arch.SyscallArgument{Value: uintptr(syscall.SIGCHLD | syscall.CLONE_FILES)}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}) + if err != nil { + return nil, err + } + + // Wait for child to enter group-stop, so we don't stop its + // bootstrapping work with t.attach below. + // + // We unfortunately don't have a handy part of memory to write the wait + // status. If the wait succeeds, we'll assume that it was the SIGSTOP. + // If the child actually exited, the attach below will fail. + _, err = t.syscallIgnoreInterrupt( + &s.initRegs, + syscall.SYS_WAIT4, + arch.SyscallArgument{Value: uintptr(pid)}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: syscall.WUNTRACED}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}, + arch.SyscallArgument{Value: 0}) + if err != nil { + return nil, err + } + + childT := &thread{ + tgid: int32(pid), + tid: int32(pid), + cpu: ^uint32(0), + } + childT.attach() + + return childT, nil +} diff --git a/pkg/sentry/platform/ptrace/subprocess_linux_amd64_unsafe.go b/pkg/sentry/platform/ptrace/subprocess_linux_amd64_unsafe.go new file mode 100644 index 000000000..697431472 --- /dev/null +++ b/pkg/sentry/platform/ptrace/subprocess_linux_amd64_unsafe.go @@ -0,0 +1,109 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 linux + +package ptrace + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// maskPool contains reusable CPU masks for setting affinity. Unfortunately, +// runtime.NumCPU doesn't actually record the number of CPUs on the system, it +// just records the number of CPUs available in the scheduler affinity set at +// startup. This may a) change over time and b) gives a number far lower than +// the maximum indexable CPU. To prevent lots of allocation in the hot path, we +// use a pool to store large masks that we can reuse during bind. +var maskPool = sync.Pool{ + New: func() interface{} { + const maxCPUs = 1024 // Not a hard limit; see below. + return make([]uintptr, maxCPUs/64) + }, +} + +// unmaskAllSignals unmasks all signals on the current thread. +// +//go:nosplit +func unmaskAllSignals() syscall.Errno { + var set linux.SignalSet + _, _, errno := syscall.RawSyscall6(syscall.SYS_RT_SIGPROCMASK, linux.SIG_SETMASK, uintptr(unsafe.Pointer(&set)), 0, linux.SignalSetSize, 0, 0) + return errno +} + +// getCPU gets the current CPU. +// +// Precondition: the current runtime thread should be locked. +func getCPU() (uint32, error) { + var cpu uintptr + if _, _, errno := syscall.RawSyscall( + unix.SYS_GETCPU, + uintptr(unsafe.Pointer(&cpu)), + 0, 0); errno != 0 { + return 0, errno + } + return uint32(cpu), nil +} + +// setCPU sets the CPU affinity. +func (t *thread) setCPU(cpu uint32) error { + mask := maskPool.Get().([]uintptr) + n := int(cpu / 64) + v := uintptr(1 << uintptr(cpu%64)) + if n >= len(mask) { + // See maskPool note above. We've actually exceeded the number + // of available cores. Grow the mask and return it. + mask = make([]uintptr, n+1) + } + mask[n] |= v + if _, _, errno := syscall.RawSyscall( + unix.SYS_SCHED_SETAFFINITY, + uintptr(t.tid), + uintptr(len(mask)*8), + uintptr(unsafe.Pointer(&mask[0]))); errno != 0 { + return errno + } + mask[n] &^= v + maskPool.Put(mask) + return nil +} + +// bind attempts to ensure that the thread is on the same CPU as the current +// thread. This provides no guarantees as it is fundamentally a racy operation: +// CPU sets may change and we may be rescheduled in the middle of this +// operation. As a result, no failures are reported. +// +// Precondition: the current runtime thread should be locked. +func (t *thread) bind() { + currentCPU, err := getCPU() + if err != nil { + return + } + if oldCPU := atomic.SwapUint32(&t.cpu, currentCPU); oldCPU != currentCPU { + // Set the affinity on the thread and save the CPU for next + // round; we don't expect CPUs to bounce around too frequently. + // + // (It's worth noting that we could move CPUs between this point + // and when the tracee finishes executing. But that would be + // roughly the status quo anyways -- we're just maximizing our + // chances of colocation, not guaranteeing it.) + t.setCPU(currentCPU) + } +} diff --git a/pkg/sentry/platform/ptrace/subprocess_unsafe.go b/pkg/sentry/platform/ptrace/subprocess_unsafe.go new file mode 100644 index 000000000..fe41641d3 --- /dev/null +++ b/pkg/sentry/platform/ptrace/subprocess_unsafe.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ptrace + +import ( + _ "unsafe" // required for go:linkname. +) + +//go:linkname beforeFork syscall.runtime_BeforeFork +func beforeFork() + +//go:linkname afterFork syscall.runtime_AfterFork +func afterFork() + +//go:linkname afterForkInChild syscall.runtime_AfterForkInChild +func afterForkInChild() diff --git a/pkg/sentry/platform/ring0/BUILD b/pkg/sentry/platform/ring0/BUILD new file mode 100644 index 000000000..2df232a64 --- /dev/null +++ b/pkg/sentry/platform/ring0/BUILD @@ -0,0 +1,52 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance") + +go_template( + name = "defs", + srcs = [ + "defs.go", + "defs_amd64.go", + "offsets_amd64.go", + "x86.go", + ], + visibility = [":__subpackages__"], +) + +go_template_instance( + name = "defs_impl", + out = "defs_impl.go", + package = "ring0", + template = ":defs", +) + +genrule( + name = "entry_impl_amd64", + srcs = ["entry_amd64.s"], + outs = ["entry_impl_amd64.s"], + cmd = "(echo -e '// build +amd64\\n' && $(location //pkg/sentry/platform/ring0/gen_offsets) && cat $(SRCS)) > $@", + tools = ["//pkg/sentry/platform/ring0/gen_offsets"], +) + +go_library( + name = "ring0", + srcs = [ + "defs_impl.go", + "entry_amd64.go", + "entry_impl_amd64.s", + "kernel.go", + "kernel_amd64.go", + "kernel_unsafe.go", + "lib_amd64.go", + "lib_amd64.s", + "ring0.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/cpuid", + "//pkg/sentry/platform/ring0/pagetables", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/platform/ring0/defs.go b/pkg/sentry/platform/ring0/defs.go new file mode 100644 index 000000000..9d947b73d --- /dev/null +++ b/pkg/sentry/platform/ring0/defs.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ring0 + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +var ( + // UserspaceSize is the total size of userspace. + UserspaceSize = uintptr(1) << (VirtualAddressBits() - 1) + + // MaximumUserAddress is the largest possible user address. + MaximumUserAddress = (UserspaceSize - 1) & ^uintptr(usermem.PageSize-1) + + // KernelStartAddress is the starting kernel address. + KernelStartAddress = ^uintptr(0) - (UserspaceSize - 1) +) + +// Kernel is a global kernel object. +// +// This contains global state, shared by multiple CPUs. +type Kernel struct { + KernelArchState +} + +// CPU is the per-CPU struct. +type CPU struct { + // self is a self reference. + // + // This is always guaranteed to be at offset zero. + self *CPU + + // kernel is reference to the kernel that this CPU was initialized + // with. This reference is kept for garbage collection purposes: CPU + // registers may refer to objects within the Kernel object that cannot + // be safely freed. + kernel *Kernel + + // CPUArchState is architecture-specific state. + CPUArchState + + // registers is a set of registers; these may be used on kernel system + // calls and exceptions via the Registers function. + registers syscall.PtraceRegs + + // KernelException handles an exception during kernel execution. + // + // Return from this call will restore registers and return to the kernel: the + // registers must be modified directly. + // + // If this function is not provided, a kernel exception results in halt. + // + // This must be go:nosplit, as this will be on the interrupt stack. + // Closures are permitted, as the pointer to the closure frame is not + // passed on the stack. + KernelException func(Vector) + + // KernelSyscall is called for kernel system calls. + // + // Return from this call will restore registers and return to the kernel: the + // registers must be modified directly. + // + // If this function is not provided, a kernel exception results in halt. + // + // This must be go:nosplit, as this will be on the interrupt stack. + // Closures are permitted, as the pointer to the closure frame is not + // passed on the stack. + KernelSyscall func() +} + +// Registers returns a modifiable-copy of the kernel registers. +// +// This is explicitly safe to call during KernelException and KernelSyscall. +// +//go:nosplit +func (c *CPU) Registers() *syscall.PtraceRegs { + return &c.registers +} diff --git a/pkg/sentry/platform/ring0/defs_amd64.go b/pkg/sentry/platform/ring0/defs_amd64.go new file mode 100644 index 000000000..bb3420125 --- /dev/null +++ b/pkg/sentry/platform/ring0/defs_amd64.go @@ -0,0 +1,113 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ring0 + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" +) + +// Segment indices and Selectors. +const ( + // Index into GDT array. + _ = iota // Null descriptor first. + _ // Reserved (Linux is kernel 32). + segKcode // Kernel code (64-bit). + segKdata // Kernel data. + segUcode32 // User code (32-bit). + segUdata // User data. + segUcode64 // User code (64-bit). + segTss // Task segment descriptor. + segTssHi // Upper bits for TSS. + segLast // Last segment (terminal, not included). +) + +// Selectors. +const ( + Kcode Selector = segKcode << 3 + Kdata Selector = segKdata << 3 + Ucode32 Selector = (segUcode32 << 3) | 3 + Udata Selector = (segUdata << 3) | 3 + Ucode64 Selector = (segUcode64 << 3) | 3 + Tss Selector = segTss << 3 +) + +// Standard segments. +var ( + UserCodeSegment32 SegmentDescriptor + UserDataSegment SegmentDescriptor + UserCodeSegment64 SegmentDescriptor + KernelCodeSegment SegmentDescriptor + KernelDataSegment SegmentDescriptor +) + +// KernelOpts has initialization options for the kernel. +type KernelOpts struct { + // PageTables are the kernel pagetables; this must be provided. + PageTables *pagetables.PageTables +} + +// KernelArchState contains architecture-specific state. +type KernelArchState struct { + KernelOpts + + // globalIDT is our set of interrupt gates. + globalIDT idt64 +} + +// CPUArchState contains CPU-specific arch state. +type CPUArchState struct { + // stack is the stack used for interrupts on this CPU. + stack [256]byte + + // errorCode is the error code from the last exception. + errorCode uintptr + + // errorType indicates the type of error code here, it is always set + // along with the errorCode value above. + // + // It will either by 1, which indicates a user error, or 0 indicating a + // kernel error. If the error code below returns false (kernel error), + // then it cannot provide relevant information about the last + // exception. + errorType uintptr + + // gdt is the CPU's descriptor table. + gdt descriptorTable + + // tss is the CPU's task state. + tss TaskState64 +} + +// ErrorCode returns the last error code. +// +// The returned boolean indicates whether the error code corresponds to the +// last user error or not. If it does not, then fault information must be +// ignored. This is generally the result of a kernel fault while servicing a +// user fault. +// +//go:nosplit +func (c *CPU) ErrorCode() (value uintptr, user bool) { + return c.errorCode, c.errorType != 0 +} + +func init() { + KernelCodeSegment.setCode64(0, 0, 0) + KernelDataSegment.setData(0, 0xffffffff, 0) + UserCodeSegment32.setCode64(0, 0, 3) + UserDataSegment.setData(0, 0xffffffff, 3) + UserCodeSegment64.setCode64(0, 0, 3) +} diff --git a/pkg/sentry/platform/ring0/entry_amd64.go b/pkg/sentry/platform/ring0/entry_amd64.go new file mode 100644 index 000000000..a3e992e0d --- /dev/null +++ b/pkg/sentry/platform/ring0/entry_amd64.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ring0 + +import ( + "syscall" +) + +// This is an assembly function. +// +// The sysenter function is invoked in two situations: +// +// (1) The guest kernel has executed a system call. +// (2) The guest application has executed a system call. +// +// The interrupt flag is examined to determine whether the system call was +// executed from kernel mode or not and the appropriate stub is called. +func sysenter() + +// swapgs swaps the current GS value. +// +// This must be called prior to sysret/iret. +func swapgs() + +// sysret returns to userspace from a system call. +// +// The return code is the vector that interrupted execution. +// +// See stubs.go for a note regarding the frame size of this function. +func sysret(*CPU, *syscall.PtraceRegs) Vector + +// "iret is the cadillac of CPL switching." +// +// -- Neel Natu +// +// iret is nearly identical to sysret, except an iret is used to fully restore +// all user state. This must be called in cases where all registers need to be +// restored. +func iret(*CPU, *syscall.PtraceRegs) Vector + +// exception is the generic exception entry. +// +// This is called by the individual stub definitions. +func exception() + +// resume is a stub that restores the CPU kernel registers. +// +// This is used when processing kernel exceptions and syscalls. +func resume() + +// Start is the CPU entrypoint. +// +// The following start conditions must be satisfied: +// +// * AX should contain the CPU pointer. +// * c.GDT() should be loaded as the GDT. +// * c.IDT() should be loaded as the IDT. +// * c.CR0() should be the current CR0 value. +// * c.CR3() should be set to the kernel PageTables. +// * c.CR4() should be the current CR4 value. +// * c.EFER() should be the current EFER value. +// +// The CPU state will be set to c.Registers(). +func Start() + +// Exception stubs. +func divideByZero() +func debug() +func nmi() +func breakpoint() +func overflow() +func boundRangeExceeded() +func invalidOpcode() +func deviceNotAvailable() +func doubleFault() +func coprocessorSegmentOverrun() +func invalidTSS() +func segmentNotPresent() +func stackSegmentFault() +func generalProtectionFault() +func pageFault() +func x87FloatingPointException() +func alignmentCheck() +func machineCheck() +func simdFloatingPointException() +func virtualizationException() +func securityException() +func syscallInt80() + +// Exception handler index. +var handlers = map[Vector]func(){ + DivideByZero: divideByZero, + Debug: debug, + NMI: nmi, + Breakpoint: breakpoint, + Overflow: overflow, + BoundRangeExceeded: boundRangeExceeded, + InvalidOpcode: invalidOpcode, + DeviceNotAvailable: deviceNotAvailable, + DoubleFault: doubleFault, + CoprocessorSegmentOverrun: coprocessorSegmentOverrun, + InvalidTSS: invalidTSS, + SegmentNotPresent: segmentNotPresent, + StackSegmentFault: stackSegmentFault, + GeneralProtectionFault: generalProtectionFault, + PageFault: pageFault, + X87FloatingPointException: x87FloatingPointException, + AlignmentCheck: alignmentCheck, + MachineCheck: machineCheck, + SIMDFloatingPointException: simdFloatingPointException, + VirtualizationException: virtualizationException, + SecurityException: securityException, + SyscallInt80: syscallInt80, +} diff --git a/pkg/sentry/platform/ring0/entry_amd64.s b/pkg/sentry/platform/ring0/entry_amd64.s new file mode 100644 index 000000000..e8638133b --- /dev/null +++ b/pkg/sentry/platform/ring0/entry_amd64.s @@ -0,0 +1,334 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "funcdata.h" +#include "textflag.h" + +// NB: Offsets are programatically generated (see BUILD). +// +// This file is concatenated with the definitions. + +// Saves a register set. +// +// This is a macro because it may need to executed in contents where a stack is +// not available for calls. +// +// The following registers are not saved: AX, SP, IP, FLAGS, all segments. +#define REGISTERS_SAVE(reg, offset) \ + MOVQ R15, offset+PTRACE_R15(reg); \ + MOVQ R14, offset+PTRACE_R14(reg); \ + MOVQ R13, offset+PTRACE_R13(reg); \ + MOVQ R12, offset+PTRACE_R12(reg); \ + MOVQ BP, offset+PTRACE_RBP(reg); \ + MOVQ BX, offset+PTRACE_RBX(reg); \ + MOVQ CX, offset+PTRACE_RCX(reg); \ + MOVQ DX, offset+PTRACE_RDX(reg); \ + MOVQ R11, offset+PTRACE_R11(reg); \ + MOVQ R10, offset+PTRACE_R10(reg); \ + MOVQ R9, offset+PTRACE_R9(reg); \ + MOVQ R8, offset+PTRACE_R8(reg); \ + MOVQ SI, offset+PTRACE_RSI(reg); \ + MOVQ DI, offset+PTRACE_RDI(reg); + +// Loads a register set. +// +// This is a macro because it may need to executed in contents where a stack is +// not available for calls. +// +// The following registers are not loaded: AX, SP, IP, FLAGS, all segments. +#define REGISTERS_LOAD(reg, offset) \ + MOVQ offset+PTRACE_R15(reg), R15; \ + MOVQ offset+PTRACE_R14(reg), R14; \ + MOVQ offset+PTRACE_R13(reg), R13; \ + MOVQ offset+PTRACE_R12(reg), R12; \ + MOVQ offset+PTRACE_RBP(reg), BP; \ + MOVQ offset+PTRACE_RBX(reg), BX; \ + MOVQ offset+PTRACE_RCX(reg), CX; \ + MOVQ offset+PTRACE_RDX(reg), DX; \ + MOVQ offset+PTRACE_R11(reg), R11; \ + MOVQ offset+PTRACE_R10(reg), R10; \ + MOVQ offset+PTRACE_R9(reg), R9; \ + MOVQ offset+PTRACE_R8(reg), R8; \ + MOVQ offset+PTRACE_RSI(reg), SI; \ + MOVQ offset+PTRACE_RDI(reg), DI; + +// SWAP_GS swaps the kernel GS (CPU). +#define SWAP_GS() \ + BYTE $0x0F; BYTE $0x01; BYTE $0xf8; + +// IRET returns from an interrupt frame. +#define IRET() \ + BYTE $0x48; BYTE $0xcf; + +// SYSRET64 executes the sysret instruction. +#define SYSRET64() \ + BYTE $0x48; BYTE $0x0f; BYTE $0x07; + +// LOAD_KERNEL_ADDRESS loads a kernel address. +#define LOAD_KERNEL_ADDRESS(from, to) \ + MOVQ from, to; \ + ORQ ·KernelStartAddress(SB), to; + +// LOAD_KERNEL_STACK loads the kernel stack. +#define LOAD_KERNEL_STACK(from) \ + LOAD_KERNEL_ADDRESS(CPU_SELF(from), SP); \ + LEAQ CPU_STACK_TOP(SP), SP; + +// See kernel.go. +TEXT ·Halt(SB),NOSPLIT,$0 + HLT + RET + +// See kernel.go. +TEXT ·Current(SB),NOSPLIT,$0-8 + MOVQ CPU_SELF(GS), AX + MOVQ AX, ret+0(FP) + RET + +// See entry_amd64.go. +TEXT ·swapgs(SB),NOSPLIT,$0 + SWAP_GS() + RET + +// See entry_amd64.go. +TEXT ·sysret(SB),NOSPLIT,$0-24 + // Save original state. + LOAD_KERNEL_ADDRESS(cpu+0(FP), BX) + LOAD_KERNEL_ADDRESS(regs+8(FP), AX) + MOVQ SP, CPU_REGISTERS+PTRACE_RSP(BX) + MOVQ BP, CPU_REGISTERS+PTRACE_RBP(BX) + MOVQ AX, CPU_REGISTERS+PTRACE_RAX(BX) + + // Restore user register state. + REGISTERS_LOAD(AX, 0) + MOVQ PTRACE_RIP(AX), CX // Needed for SYSRET. + MOVQ PTRACE_FLAGS(AX), R11 // Needed for SYSRET. + MOVQ PTRACE_RSP(AX), SP // Restore the stack directly. + MOVQ PTRACE_RAX(AX), AX // Restore AX (scratch). + SYSRET64() + +// See entry_amd64.go. +TEXT ·iret(SB),NOSPLIT,$0-24 + // Save original state. + LOAD_KERNEL_ADDRESS(cpu+0(FP), BX) + LOAD_KERNEL_ADDRESS(regs+8(FP), AX) + MOVQ SP, CPU_REGISTERS+PTRACE_RSP(BX) + MOVQ BP, CPU_REGISTERS+PTRACE_RBP(BX) + MOVQ AX, CPU_REGISTERS+PTRACE_RAX(BX) + + // Build an IRET frame & restore state. + LOAD_KERNEL_STACK(BX) + MOVQ PTRACE_SS(AX), BX; PUSHQ BX + MOVQ PTRACE_RSP(AX), CX; PUSHQ CX + MOVQ PTRACE_FLAGS(AX), DX; PUSHQ DX + MOVQ PTRACE_CS(AX), DI; PUSHQ DI + MOVQ PTRACE_RIP(AX), SI; PUSHQ SI + REGISTERS_LOAD(AX, 0) // Restore most registers. + MOVQ PTRACE_RAX(AX), AX // Restore AX (scratch). + IRET() + +// See entry_amd64.go. +TEXT ·resume(SB),NOSPLIT,$0 + // See iret, above. + MOVQ CPU_REGISTERS+PTRACE_SS(GS), BX; PUSHQ BX + MOVQ CPU_REGISTERS+PTRACE_RSP(GS), CX; PUSHQ CX + MOVQ CPU_REGISTERS+PTRACE_FLAGS(GS), DX; PUSHQ DX + MOVQ CPU_REGISTERS+PTRACE_CS(GS), DI; PUSHQ DI + MOVQ CPU_REGISTERS+PTRACE_RIP(GS), SI; PUSHQ SI + REGISTERS_LOAD(GS, CPU_REGISTERS) + MOVQ CPU_REGISTERS+PTRACE_RAX(GS), AX + IRET() + +// See entry_amd64.go. +TEXT ·Start(SB),NOSPLIT,$0 + LOAD_KERNEL_STACK(AX) // Set the stack. + PUSHQ $0x0 // Previous frame pointer. + MOVQ SP, BP // Set frame pointer. + PUSHQ AX // First argument (CPU). + CALL ·start(SB) // Call Go hook. + JMP ·resume(SB) // Restore to registers. + +// See entry_amd64.go. +TEXT ·sysenter(SB),NOSPLIT,$0 + // Interrupts are always disabled while we're executing in kernel mode + // and always enabled while executing in user mode. Therefore, we can + // reliably look at the flags in R11 to determine where this syscall + // was from. + TESTL $_RFLAGS_IF, R11 + JZ kernel + +user: + SWAP_GS() + XCHGQ CPU_REGISTERS+PTRACE_RSP(GS), SP // Swap stacks. + XCHGQ CPU_REGISTERS+PTRACE_RAX(GS), AX // Swap for AX (regs). + REGISTERS_SAVE(AX, 0) // Save all except IP, FLAGS, SP, AX. + MOVQ CPU_REGISTERS+PTRACE_RAX(GS), BX // Load saved AX value. + MOVQ BX, PTRACE_RAX(AX) // Save everything else. + MOVQ BX, PTRACE_ORIGRAX(AX) + MOVQ CX, PTRACE_RIP(AX) + MOVQ R11, PTRACE_FLAGS(AX) + MOVQ CPU_REGISTERS+PTRACE_RSP(GS), BX; MOVQ BX, PTRACE_RSP(AX) + MOVQ $0, CPU_ERROR_CODE(GS) // Clear error code. + MOVQ $1, CPU_ERROR_TYPE(GS) // Set error type to user. + + // Return to the kernel, where the frame is: + // + // vector (sp+24) + // regs (sp+16) + // cpu (sp+8) + // vcpu.Switch (sp+0) + // + MOVQ CPU_REGISTERS+PTRACE_RBP(GS), BP // Original base pointer. + MOVQ $Syscall, 24(SP) // Output vector. + RET + +kernel: + // We can't restore the original stack, but we can access the registers + // in the CPU state directly. No need for temporary juggling. + MOVQ AX, CPU_REGISTERS+PTRACE_ORIGRAX(GS) + MOVQ AX, CPU_REGISTERS+PTRACE_RAX(GS) + REGISTERS_SAVE(GS, CPU_REGISTERS) + MOVQ CX, CPU_REGISTERS+PTRACE_RIP(GS) + MOVQ R11, CPU_REGISTERS+PTRACE_FLAGS(GS) + MOVQ SP, CPU_REGISTERS+PTRACE_RSP(GS) + MOVQ $0, CPU_ERROR_CODE(GS) // Clear error code. + MOVQ $0, CPU_ERROR_TYPE(GS) // Set error type to kernel. + + // Load the function stored in KernelSyscall. + // + // Note that this function needs to be executed on the stack in case + // the runtime decides to make use of the redzone (grumble). This also + // protects against any functions that might not be go:nosplit, since + // this will cause a failure immediately. + LOAD_KERNEL_STACK(GS) + MOVQ CPU_KERNEL_SYSCALL(GS), DX // Function data. + MOVQ 0(DX), AX // Function pointer. + PUSHQ BP // Push the frame pointer. + MOVQ SP, BP // Set frame pointer value. + CALL *AX // Call the function. + POPQ BP // Restore the frame pointer. + JMP ·resume(SB) + +// exception is a generic exception handler. +// +// There are two cases handled: +// +// 1) An exception in kernel mode: this results in saving the state at the time +// of the exception and calling the defined hook. +// +// 2) An exception in guest mode: the original kernel frame is restored, and +// the vector & error codes are pushed as return values. +// +// See below for the stubs that call exception. +TEXT ·exception(SB),NOSPLIT,$0 + // Determine whether the exception occurred in kernel mode or user + // mode, based on the flags. We expect the following stack: + // + // SS (sp+48) + // SP (sp+40) + // FLAGS (sp+32) + // CS (sp+24) + // IP (sp+16) + // ERROR_CODE (sp+8) + // VECTOR (sp+0) + // + TESTL $_RFLAGS_IF, 32(SP) + JZ kernel + +user: + SWAP_GS() + XCHGQ CPU_REGISTERS+PTRACE_RAX(GS), AX // Swap for AX (regs). + REGISTERS_SAVE(AX, 0) // Save all except IP, FLAGS, SP, AX. + MOVQ CPU_REGISTERS+PTRACE_RAX(GS), BX // Load saved AX value. + MOVQ BX, PTRACE_RAX(AX) // Save everything else. + MOVQ BX, PTRACE_ORIGRAX(AX) + MOVQ 16(SP), BX; MOVQ BX, PTRACE_RIP(AX) + MOVQ 24(SP), CX; MOVQ CX, PTRACE_CS(AX) + MOVQ 32(SP), DX; MOVQ DX, PTRACE_FLAGS(AX) + MOVQ 40(SP), DI; MOVQ DI, PTRACE_RSP(AX) + MOVQ 48(SP), SI; MOVQ SI, PTRACE_SS(AX) + + // Copy out and return. + MOVQ 0(SP), BX // Load vector. + MOVQ 8(SP), CX // Load error code. + MOVQ CPU_REGISTERS+PTRACE_RSP(GS), SP // Original stack (kernel version). + MOVQ CPU_REGISTERS+PTRACE_RBP(GS), BP // Original base pointer. + MOVQ CX, CPU_ERROR_CODE(GS) // Set error code. + MOVQ $1, CPU_ERROR_TYPE(GS) // Set error type to user. + MOVQ BX, 24(SP) // Output vector. + RET + +kernel: + // As per above, we can save directly. + MOVQ AX, CPU_REGISTERS+PTRACE_RAX(GS) + MOVQ AX, CPU_REGISTERS+PTRACE_ORIGRAX(GS) + REGISTERS_SAVE(GS, CPU_REGISTERS) + MOVQ 16(SP), AX; MOVQ AX, CPU_REGISTERS+PTRACE_RIP(GS) + MOVQ 32(SP), BX; MOVQ BX, CPU_REGISTERS+PTRACE_FLAGS(GS) + MOVQ 40(SP), CX; MOVQ CX, CPU_REGISTERS+PTRACE_RSP(GS) + + // Set the error code and adjust the stack. + MOVQ 8(SP), AX // Load the error code. + MOVQ AX, CPU_ERROR_CODE(GS) // Copy out to the CPU. + MOVQ $0, CPU_ERROR_TYPE(GS) // Set error type to kernel. + MOVQ 0(SP), BX // BX contains the vector. + ADDQ $48, SP // Drop the exception frame. + + // Load the function stored in KernelException. + // + // See note above re: the kernel stack. + LOAD_KERNEL_STACK(GS) + MOVQ CPU_KERNEL_EXCEPTION(GS), DX // Function data. + MOVQ 0(DX), AX // Function pointer. + PUSHQ BP // Push the frame pointer. + MOVQ SP, BP // Set frame pointer value. + PUSHQ BX // First argument (vector). + CALL *AX // Call the function. + POPQ BX // Discard the argument. + POPQ BP // Restore the frame pointer. + JMP ·resume(SB) + +#define EXCEPTION_WITH_ERROR(value, symbol) \ +TEXT symbol,NOSPLIT,$0; \ + PUSHQ $value; \ + JMP ·exception(SB); + +#define EXCEPTION_WITHOUT_ERROR(value, symbol) \ +TEXT symbol,NOSPLIT,$0; \ + PUSHQ $0x0; \ + PUSHQ $value; \ + JMP ·exception(SB); + +EXCEPTION_WITHOUT_ERROR(DivideByZero, ·divideByZero(SB)) +EXCEPTION_WITHOUT_ERROR(Debug, ·debug(SB)) +EXCEPTION_WITHOUT_ERROR(NMI, ·nmi(SB)) +EXCEPTION_WITHOUT_ERROR(Breakpoint, ·breakpoint(SB)) +EXCEPTION_WITHOUT_ERROR(Overflow, ·overflow(SB)) +EXCEPTION_WITHOUT_ERROR(BoundRangeExceeded, ·boundRangeExceeded(SB)) +EXCEPTION_WITHOUT_ERROR(InvalidOpcode, ·invalidOpcode(SB)) +EXCEPTION_WITHOUT_ERROR(DeviceNotAvailable, ·deviceNotAvailable(SB)) +EXCEPTION_WITH_ERROR(DoubleFault, ·doubleFault(SB)) +EXCEPTION_WITHOUT_ERROR(CoprocessorSegmentOverrun, ·coprocessorSegmentOverrun(SB)) +EXCEPTION_WITH_ERROR(InvalidTSS, ·invalidTSS(SB)) +EXCEPTION_WITH_ERROR(SegmentNotPresent, ·segmentNotPresent(SB)) +EXCEPTION_WITH_ERROR(StackSegmentFault, ·stackSegmentFault(SB)) +EXCEPTION_WITH_ERROR(GeneralProtectionFault, ·generalProtectionFault(SB)) +EXCEPTION_WITH_ERROR(PageFault, ·pageFault(SB)) +EXCEPTION_WITHOUT_ERROR(X87FloatingPointException, ·x87FloatingPointException(SB)) +EXCEPTION_WITH_ERROR(AlignmentCheck, ·alignmentCheck(SB)) +EXCEPTION_WITHOUT_ERROR(MachineCheck, ·machineCheck(SB)) +EXCEPTION_WITHOUT_ERROR(SIMDFloatingPointException, ·simdFloatingPointException(SB)) +EXCEPTION_WITHOUT_ERROR(VirtualizationException, ·virtualizationException(SB)) +EXCEPTION_WITH_ERROR(SecurityException, ·securityException(SB)) +EXCEPTION_WITHOUT_ERROR(SyscallInt80, ·syscallInt80(SB)) diff --git a/pkg/sentry/platform/ring0/gen_offsets/BUILD b/pkg/sentry/platform/ring0/gen_offsets/BUILD new file mode 100644 index 000000000..3bce56985 --- /dev/null +++ b/pkg/sentry/platform/ring0/gen_offsets/BUILD @@ -0,0 +1,25 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_binary") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "defs_impl", + out = "defs_impl.go", + package = "main", + template = "//pkg/sentry/platform/ring0:defs", +) + +go_binary( + name = "gen_offsets", + srcs = [ + "defs_impl.go", + "main.go", + ], + visibility = ["//pkg/sentry/platform/ring0:__pkg__"], + deps = [ + "//pkg/cpuid", + "//pkg/sentry/platform/ring0/pagetables", + "//pkg/sentry/usermem", + ], +) diff --git a/pkg/sentry/platform/ring0/gen_offsets/main.go b/pkg/sentry/platform/ring0/gen_offsets/main.go new file mode 100644 index 000000000..ffa7eaf77 --- /dev/null +++ b/pkg/sentry/platform/ring0/gen_offsets/main.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Binary gen_offsets is a helper for generating offset headers. +package main + +import ( + "os" +) + +func main() { + Emit(os.Stdout) +} diff --git a/pkg/sentry/platform/ring0/kernel.go b/pkg/sentry/platform/ring0/kernel.go new file mode 100644 index 000000000..b0471ab9a --- /dev/null +++ b/pkg/sentry/platform/ring0/kernel.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ring0 + +// New creates a new kernel. +// +// N.B. that constraints on KernelOpts must be satisfied. +// +// Init must have been called. +func New(opts KernelOpts) *Kernel { + k := new(Kernel) + k.init(opts) + return k +} + +// NewCPU creates a new CPU associated with this Kernel. +// +// Note that execution of the new CPU must begin at Start, with constraints as +// documented. Initialization is not completed by this method alone. +// +// See also Init. +func (k *Kernel) NewCPU() *CPU { + c := new(CPU) + c.Init(k) + return c +} + +// Halt halts execution. +func Halt() + +// Current returns the current CPU. +// +// Its use is only legal in the KernelSyscall and KernelException contexts, +// which must all be guarded go:nosplit. +func Current() *CPU + +// defaultSyscall is the default syscall hook. +// +//go:nosplit +func defaultSyscall() { Halt() } + +// defaultException is the default exception hook. +// +//go:nosplit +func defaultException(Vector) { Halt() } + +// Init allows the initialization of a CPU from a kernel without allocation. +// The same constraints as NewCPU apply. +// +// Init allows embedding in other objects. +func (c *CPU) Init(k *Kernel) { + c.self = c // Set self reference. + c.kernel = k // Set kernel reference. + c.init() // Perform architectural init. + + // Defaults. + c.KernelSyscall = defaultSyscall + c.KernelException = defaultException +} diff --git a/pkg/sentry/platform/ring0/kernel_amd64.go b/pkg/sentry/platform/ring0/kernel_amd64.go new file mode 100644 index 000000000..c82613a9c --- /dev/null +++ b/pkg/sentry/platform/ring0/kernel_amd64.go @@ -0,0 +1,280 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ring0 + +import ( + "encoding/binary" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables" +) + +const ( + // KernelFlagsSet should always be set in the kernel. + KernelFlagsSet = _RFLAGS_RESERVED + + // UserFlagsSet are always set in userspace. + UserFlagsSet = _RFLAGS_RESERVED | _RFLAGS_IF + + // KernelFlagsClear should always be clear in the kernel. + KernelFlagsClear = _RFLAGS_IF | _RFLAGS_NT | _RFLAGS_IOPL + + // UserFlagsClear are always cleared in userspace. + UserFlagsClear = _RFLAGS_NT | _RFLAGS_IOPL +) + +// init initializes architecture-specific state. +func (k *Kernel) init(opts KernelOpts) { + // Save the root page tables. + k.PageTables = opts.PageTables + + // Setup the IDT, which is uniform. + for v, handler := range handlers { + // Note that we set all traps to use the interrupt stack, this + // is defined below when setting up the TSS. + k.globalIDT[v].setInterrupt(Kcode, uint64(kernelFunc(handler)), 0 /* dpl */, 1 /* ist */) + } +} + +// init initializes architecture-specific state. +func (c *CPU) init() { + // Null segment. + c.gdt[0].setNull() + + // Kernel & user segments. + c.gdt[segKcode] = KernelCodeSegment + c.gdt[segKdata] = KernelDataSegment + c.gdt[segUcode32] = UserCodeSegment32 + c.gdt[segUdata] = UserDataSegment + c.gdt[segUcode64] = UserCodeSegment64 + + // The task segment, this spans two entries. + tssBase, tssLimit, _ := c.TSS() + c.gdt[segTss].set( + uint32(tssBase), + uint32(tssLimit), + 0, // Privilege level zero. + SegmentDescriptorPresent| + SegmentDescriptorAccess| + SegmentDescriptorWrite| + SegmentDescriptorExecute) + c.gdt[segTssHi].setHi(uint32((tssBase) >> 32)) + + // Set the kernel stack pointer in the TSS (virtual address). + stackAddr := c.StackTop() + c.tss.rsp0Lo = uint32(stackAddr) + c.tss.rsp0Hi = uint32(stackAddr >> 32) + c.tss.ist1Lo = uint32(stackAddr) + c.tss.ist1Hi = uint32(stackAddr >> 32) + + // Permanently set the kernel segments. + c.registers.Cs = uint64(Kcode) + c.registers.Ds = uint64(Kdata) + c.registers.Es = uint64(Kdata) + c.registers.Ss = uint64(Kdata) + c.registers.Fs = uint64(Kdata) + c.registers.Gs = uint64(Kdata) +} + +// StackTop returns the kernel's stack address. +// +//go:nosplit +func (c *CPU) StackTop() uint64 { + return uint64(kernelAddr(&c.stack[0])) + uint64(len(c.stack)) +} + +// IDT returns the CPU's IDT base and limit. +// +//go:nosplit +func (c *CPU) IDT() (uint64, uint16) { + return uint64(kernelAddr(&c.kernel.globalIDT[0])), uint16(binary.Size(&c.kernel.globalIDT) - 1) +} + +// GDT returns the CPU's GDT base and limit. +// +//go:nosplit +func (c *CPU) GDT() (uint64, uint16) { + return uint64(kernelAddr(&c.gdt[0])), uint16(8*segLast - 1) +} + +// TSS returns the CPU's TSS base, limit and value. +// +//go:nosplit +func (c *CPU) TSS() (uint64, uint16, *SegmentDescriptor) { + return uint64(kernelAddr(&c.tss)), uint16(binary.Size(&c.tss) - 1), &c.gdt[segTss] +} + +// CR0 returns the CPU's CR0 value. +// +//go:nosplit +func (c *CPU) CR0() uint64 { + return _CR0_PE | _CR0_PG | _CR0_ET +} + +// CR4 returns the CPU's CR4 value. +// +//go:nosplit +func (c *CPU) CR4() uint64 { + cr4 := uint64(_CR4_PAE | _CR4_PSE | _CR4_OSFXSR | _CR4_OSXMMEXCPT) + if hasPCID { + cr4 |= _CR4_PCIDE + } + if hasXSAVE { + cr4 |= _CR4_OSXSAVE + } + if hasSMEP { + cr4 |= _CR4_SMEP + } + if hasFSGSBASE { + cr4 |= _CR4_FSGSBASE + } + return cr4 +} + +// EFER returns the CPU's EFER value. +// +//go:nosplit +func (c *CPU) EFER() uint64 { + return _EFER_LME | _EFER_SCE | _EFER_NX +} + +// IsCanonical indicates whether addr is canonical per the amd64 spec. +// +//go:nosplit +func IsCanonical(addr uint64) bool { + return addr <= 0x00007fffffffffff || addr > 0xffff800000000000 +} + +// Flags contains flags related to switch. +type Flags uintptr + +const ( + // FlagFull indicates that a full restore should be not, not a fast + // restore (on the syscall return path.) + FlagFull = 1 << iota + + // FlagFlush indicates that a full TLB flush is required. + FlagFlush +) + +// SwitchToUser performs either a sysret or an iret. +// +// The return value is the vector that interrupted execution. +// +// This function will not split the stack. Callers will probably want to call +// runtime.entersyscall (and pair with a call to runtime.exitsyscall) prior to +// calling this function. +// +// When this is done, this region is quite sensitive to things like system +// calls. After calling entersyscall, any memory used must have been allocated +// and no function calls without go:nosplit are permitted. Any calls made here +// are protected appropriately (e.g. IsCanonical and CR3). +// +// Also note that this function transitively depends on the compiler generating +// code that uses IP-relative addressing inside of absolute addresses. That's +// the case for amd64, but may not be the case for other architectures. +// +//go:nosplit +func (c *CPU) SwitchToUser(regs *syscall.PtraceRegs, fpState *byte, pt *pagetables.PageTables, flags Flags) (vector Vector) { + // Check for canonical addresses. + if !IsCanonical(regs.Rip) || !IsCanonical(regs.Rsp) || !IsCanonical(regs.Fs_base) || !IsCanonical(regs.Gs_base) { + return GeneralProtectionFault + } + + var ( + userCR3 uint64 + kernelCR3 uint64 + ) + + // Sanitize registers. + if flags&FlagFlush != 0 { + userCR3 = pt.FlushCR3() + } else { + userCR3 = pt.CR3() + } + regs.Eflags &= ^uint64(UserFlagsClear) + regs.Eflags |= UserFlagsSet + regs.Cs = uint64(Ucode64) // Required for iret. + regs.Ss = uint64(Udata) // Ditto. + kernelCR3 = c.kernel.PageTables.CR3() + + // Perform the switch. + swapgs() // GS will be swapped on return. + wrfs(uintptr(regs.Fs_base)) // Set application FS. + wrgs(uintptr(regs.Gs_base)) // Set application GS. + LoadFloatingPoint(fpState) // Copy in floating point. + jumpToKernel() // Switch to upper half. + writeCR3(uintptr(userCR3)) // Change to user address space. + if flags&FlagFull != 0 { + vector = iret(c, regs) + } else { + vector = sysret(c, regs) + } + writeCR3(uintptr(kernelCR3)) // Return to kernel address space. + jumpToUser() // Return to lower half. + SaveFloatingPoint(fpState) // Copy out floating point. + wrfs(uintptr(c.registers.Fs_base)) // Restore kernel FS. + return +} + +// start is the CPU entrypoint. +// +// This is called from the Start asm stub (see entry_amd64.go); on return the +// registers in c.registers will be restored (not segments). +// +//go:nosplit +func start(c *CPU) { + // Save per-cpu & FS segment. + wrgs(kernelAddr(c)) + wrfs(uintptr(c.Registers().Fs_base)) + + // Initialize floating point. + // + // Note that on skylake, the valid XCR0 mask reported seems to be 0xff. + // This breaks down as: + // + // bit0 - x87 + // bit1 - SSE + // bit2 - AVX + // bit3-4 - MPX + // bit5-7 - AVX512 + // + // For some reason, enabled MPX & AVX512 on platforms that report them + // seems to be cause a general protection fault. (Maybe there are some + // virtualization issues and these aren't exported to the guest cpuid.) + // This needs further investigation, but we can limit the floating + // point operations to x87, SSE & AVX for now. + fninit() + xsetbv(0, validXCR0Mask&0x7) + + // Set the syscall target. + wrmsr(_MSR_LSTAR, kernelFunc(sysenter)) + wrmsr(_MSR_SYSCALL_MASK, _RFLAGS_STEP|_RFLAGS_IF|_RFLAGS_DF|_RFLAGS_IOPL|_RFLAGS_AC|_RFLAGS_NT) + + // NOTE: This depends on having the 64-bit segments immediately + // following the 32-bit user segments. This is simply the way the + // sysret instruction is designed to work (it assumes they follow). + wrmsr(_MSR_STAR, uintptr(uint64(Kcode)<<32|uint64(Ucode32)<<48)) + wrmsr(_MSR_CSTAR, kernelFunc(sysenter)) +} + +// ReadCR2 reads the current CR2 value. +// +//go:nosplit +func ReadCR2() uintptr { + return readCR2() +} diff --git a/pkg/sentry/platform/ring0/kernel_unsafe.go b/pkg/sentry/platform/ring0/kernel_unsafe.go new file mode 100644 index 000000000..cfb3ad853 --- /dev/null +++ b/pkg/sentry/platform/ring0/kernel_unsafe.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ring0 + +import ( + "unsafe" +) + +// eface mirrors runtime.eface. +type eface struct { + typ uintptr + data unsafe.Pointer +} + +// kernelAddr returns the kernel virtual address for the given object. +// +//go:nosplit +func kernelAddr(obj interface{}) uintptr { + e := (*eface)(unsafe.Pointer(&obj)) + return KernelStartAddress | uintptr(e.data) +} + +// kernelFunc returns the address of the given function. +// +//go:nosplit +func kernelFunc(fn func()) uintptr { + fnptr := (**uintptr)(unsafe.Pointer(&fn)) + return KernelStartAddress | **fnptr +} diff --git a/pkg/sentry/platform/ring0/lib_amd64.go b/pkg/sentry/platform/ring0/lib_amd64.go new file mode 100644 index 000000000..f1ed5bfb4 --- /dev/null +++ b/pkg/sentry/platform/ring0/lib_amd64.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ring0 + +import ( + "gvisor.googlesource.com/gvisor/pkg/cpuid" +) + +// LoadFloatingPoint loads floating point state by the most efficient mechanism +// available (set by Init). +var LoadFloatingPoint func(*byte) + +// SaveFloatingPoint saves floating point state by the most efficient mechanism +// available (set by Init). +var SaveFloatingPoint func(*byte) + +// fxrstor uses fxrstor64 to load floating point state. +func fxrstor(*byte) + +// xrstor uses xrstor to load floating point state. +func xrstor(*byte) + +// fxsave uses fxsave64 to save floating point state. +func fxsave(*byte) + +// xsave uses xsave to save floating point state. +func xsave(*byte) + +// xsaveopt uses xsaveopt to save floating point state. +func xsaveopt(*byte) + +// wrfs sets the GS address (set by init). +var wrfs func(addr uintptr) + +// wrfsbase writes to the GS base address. +func wrfsbase(addr uintptr) + +// wrfsmsr writes to the GS_BASE MSR. +func wrfsmsr(addr uintptr) + +// wrgs sets the GS address (set by init). +var wrgs func(addr uintptr) + +// wrgsbase writes to the GS base address. +func wrgsbase(addr uintptr) + +// wrgsmsr writes to the GS_BASE MSR. +func wrgsmsr(addr uintptr) + +// writeCR3 writes the CR3 value. +func writeCR3(phys uintptr) + +// readCR2 reads the current CR2 value. +func readCR2() uintptr + +// jumpToKernel jumps to the kernel version of the current RIP. +func jumpToKernel() + +// jumpToUser jumps to the user version of the current RIP. +func jumpToUser() + +// fninit initializes the floating point unit. +func fninit() + +// xsetbv writes to an extended control register. +func xsetbv(reg, value uintptr) + +// xgetbv reads an extended control register. +func xgetbv(reg uintptr) uintptr + +// wrmsr reads to the given MSR. +func wrmsr(reg, value uintptr) + +// rdmsr reads the given MSR. +func rdmsr(reg uintptr) uintptr + +// Mostly-constants set by Init. +var ( + hasSMEP bool + hasPCID bool + hasXSAVEOPT bool + hasXSAVE bool + hasFSGSBASE bool + validXCR0Mask uintptr +) + +// Init sets function pointers based on architectural features. +// +// This must be called prior to using ring0. +func Init(featureSet *cpuid.FeatureSet) { + hasSMEP = featureSet.HasFeature(cpuid.X86FeatureSMEP) + hasPCID = featureSet.HasFeature(cpuid.X86FeaturePCID) + hasXSAVEOPT = featureSet.UseXsaveopt() + hasXSAVE = featureSet.UseXsave() + hasFSGSBASE = featureSet.HasFeature(cpuid.X86FeatureFSGSBase) + validXCR0Mask = uintptr(featureSet.ValidXCR0Mask()) + if hasXSAVEOPT { + SaveFloatingPoint = xsaveopt + LoadFloatingPoint = xrstor + } else if hasXSAVE { + SaveFloatingPoint = xsave + LoadFloatingPoint = xrstor + } else { + SaveFloatingPoint = fxsave + LoadFloatingPoint = fxrstor + } + if hasFSGSBASE { + wrfs = wrfsbase + wrgs = wrgsbase + } else { + wrfs = wrfsmsr + wrgs = wrgsmsr + } +} diff --git a/pkg/sentry/platform/ring0/lib_amd64.s b/pkg/sentry/platform/ring0/lib_amd64.s new file mode 100644 index 000000000..6f143ea5a --- /dev/null +++ b/pkg/sentry/platform/ring0/lib_amd64.s @@ -0,0 +1,247 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "funcdata.h" +#include "textflag.h" + +// fxrstor loads floating point state. +// +// The code corresponds to: +// +// fxrstor64 (%rbx) +// +TEXT ·fxrstor(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), BX + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x0b; + RET + +// xrstor loads floating point state. +// +// The code corresponds to: +// +// xrstor (%rdi) +// +TEXT ·xrstor(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), DI + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x2f; + RET + +// fxsave saves floating point state. +// +// The code corresponds to: +// +// fxsave64 (%rbx) +// +TEXT ·fxsave(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), BX + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x03; + RET + +// xsave saves floating point state. +// +// The code corresponds to: +// +// xsave (%rdi) +// +TEXT ·xsave(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), DI + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x27; + RET + +// xsaveopt saves floating point state. +// +// The code corresponds to: +// +// xsaveopt (%rdi) +// +TEXT ·xsaveopt(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), DI + MOVL $0xffffffff, AX + MOVL $0xffffffff, DX + BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0x37; + RET + +// wrfsbase writes to the FS base. +// +// The code corresponds to: +// +// wrfsbase %rax +// +TEXT ·wrfsbase(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), AX + BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0xd0; + RET + +// wrfsmsr writes to the FSBASE MSR. +// +// The code corresponds to: +// +// wrmsr (writes EDX:EAX to the MSR in ECX) +// +TEXT ·wrfsmsr(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), AX + MOVQ AX, DX + SHRQ $32, DX + MOVQ $0xc0000100, CX // MSR_FS_BASE + BYTE $0x0f; BYTE $0x30; + RET + +// wrgsbase writes to the GS base. +// +// The code corresponds to: +// +// wrgsbase %rax +// +TEXT ·wrgsbase(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), AX + BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xae; BYTE $0xd8; + RET + +// wrgsmsr writes to the GSBASE MSR. +// +// See wrfsmsr. +TEXT ·wrgsmsr(SB),NOSPLIT,$0-8 + MOVQ addr+0(FP), AX + MOVQ AX, DX + SHRQ $32, DX + MOVQ $0xc0000101, CX // MSR_GS_BASE + BYTE $0x0f; BYTE $0x30; // WRMSR + RET + +// jumpToUser changes execution to the user address. +// +// This works by changing the return value to the user version. +TEXT ·jumpToUser(SB),NOSPLIT,$0 + MOVQ 0(SP), AX + MOVQ ·KernelStartAddress(SB), BX + NOTQ BX + ANDQ BX, SP // Switch the stack. + ANDQ BX, BP // Switch the frame pointer. + ANDQ BX, AX // Future return value. + MOVQ AX, 0(SP) + RET + +// jumpToKernel changes execution to the kernel address space. +// +// This works by changing the return value to the kernel version. +TEXT ·jumpToKernel(SB),NOSPLIT,$0 + MOVQ 0(SP), AX + MOVQ ·KernelStartAddress(SB), BX + ORQ BX, SP // Switch the stack. + ORQ BX, BP // Switch the frame pointer. + ORQ BX, AX // Future return value. + MOVQ AX, 0(SP) + RET + +// writeCR3 writes the given CR3 value. +// +// The code corresponds to: +// +// mov %rax, %cr3 +// +TEXT ·writeCR3(SB),NOSPLIT,$0-8 + MOVQ cr3+0(FP), AX + BYTE $0x0f; BYTE $0x22; BYTE $0xd8; + RET + +// readCR3 reads the current CR3 value. +// +// The code corresponds to: +// +// mov %cr3, %rax +// +TEXT ·readCR3(SB),NOSPLIT,$0-8 + BYTE $0x0f; BYTE $0x20; BYTE $0xd8; + MOVQ AX, ret+0(FP) + RET + +// readCR2 reads the current CR2 value. +// +// The code corresponds to: +// +// mov %cr2, %rax +// +TEXT ·readCR2(SB),NOSPLIT,$0-8 + BYTE $0x0f; BYTE $0x20; BYTE $0xd0; + MOVQ AX, ret+0(FP) + RET + +// fninit initializes the floating point unit. +// +// The code corresponds to: +// +// fninit +TEXT ·fninit(SB),NOSPLIT,$0 + BYTE $0xdb; BYTE $0xe3; + RET + +// xsetbv writes to an extended control register. +// +// The code corresponds to: +// +// xsetbv +// +TEXT ·xsetbv(SB),NOSPLIT,$0-16 + MOVL reg+0(FP), CX + MOVL value+8(FP), AX + MOVL value+12(FP), DX + BYTE $0x0f; BYTE $0x01; BYTE $0xd1; + RET + +// xgetbv reads an extended control register. +// +// The code corresponds to: +// +// xgetbv +// +TEXT ·xgetbv(SB),NOSPLIT,$0-16 + MOVL reg+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0; + MOVL AX, ret+8(FP) + MOVL DX, ret+12(FP) + RET + +// wrmsr writes to a control register. +// +// The code corresponds to: +// +// wrmsr +// +TEXT ·wrmsr(SB),NOSPLIT,$0-16 + MOVL reg+0(FP), CX + MOVL value+8(FP), AX + MOVL value+12(FP), DX + BYTE $0x0f; BYTE $0x30; + RET + +// rdmsr reads a control register. +// +// The code corresponds to: +// +// rdmsr +// +TEXT ·rdmsr(SB),NOSPLIT,$0-16 + MOVL reg+0(FP), CX + BYTE $0x0f; BYTE $0x32; + MOVL AX, ret+8(FP) + MOVL DX, ret+12(FP) + RET diff --git a/pkg/sentry/platform/ring0/offsets_amd64.go b/pkg/sentry/platform/ring0/offsets_amd64.go new file mode 100644 index 000000000..9acd442ba --- /dev/null +++ b/pkg/sentry/platform/ring0/offsets_amd64.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package ring0 + +import ( + "fmt" + "io" + "reflect" + "syscall" +) + +// Emit prints architecture-specific offsets. +func Emit(w io.Writer) { + fmt.Fprintf(w, "// Automatically generated, do not edit.\n") + + c := &CPU{} + fmt.Fprintf(w, "\n// CPU offsets.\n") + fmt.Fprintf(w, "#define CPU_SELF 0x%02x\n", reflect.ValueOf(&c.self).Pointer()-reflect.ValueOf(c).Pointer()) + fmt.Fprintf(w, "#define CPU_REGISTERS 0x%02x\n", reflect.ValueOf(&c.registers).Pointer()-reflect.ValueOf(c).Pointer()) + fmt.Fprintf(w, "#define CPU_STACK_TOP 0x%02x\n", reflect.ValueOf(&c.stack[0]).Pointer()-reflect.ValueOf(c).Pointer()+uintptr(len(c.stack))) + fmt.Fprintf(w, "#define CPU_ERROR_CODE 0x%02x\n", reflect.ValueOf(&c.errorCode).Pointer()-reflect.ValueOf(c).Pointer()) + fmt.Fprintf(w, "#define CPU_ERROR_TYPE 0x%02x\n", reflect.ValueOf(&c.errorType).Pointer()-reflect.ValueOf(c).Pointer()) + fmt.Fprintf(w, "#define CPU_KERNEL_EXCEPTION 0x%02x\n", reflect.ValueOf(&c.KernelException).Pointer()-reflect.ValueOf(c).Pointer()) + fmt.Fprintf(w, "#define CPU_KERNEL_SYSCALL 0x%02x\n", reflect.ValueOf(&c.KernelSyscall).Pointer()-reflect.ValueOf(c).Pointer()) + + fmt.Fprintf(w, "\n// Bits.\n") + fmt.Fprintf(w, "#define _RFLAGS_IF 0x%02x\n", _RFLAGS_IF) + + fmt.Fprintf(w, "\n// Vectors.\n") + fmt.Fprintf(w, "#define DivideByZero 0x%02x\n", DivideByZero) + fmt.Fprintf(w, "#define Debug 0x%02x\n", Debug) + fmt.Fprintf(w, "#define NMI 0x%02x\n", NMI) + fmt.Fprintf(w, "#define Breakpoint 0x%02x\n", Breakpoint) + fmt.Fprintf(w, "#define Overflow 0x%02x\n", Overflow) + fmt.Fprintf(w, "#define BoundRangeExceeded 0x%02x\n", BoundRangeExceeded) + fmt.Fprintf(w, "#define InvalidOpcode 0x%02x\n", InvalidOpcode) + fmt.Fprintf(w, "#define DeviceNotAvailable 0x%02x\n", DeviceNotAvailable) + fmt.Fprintf(w, "#define DoubleFault 0x%02x\n", DoubleFault) + fmt.Fprintf(w, "#define CoprocessorSegmentOverrun 0x%02x\n", CoprocessorSegmentOverrun) + fmt.Fprintf(w, "#define InvalidTSS 0x%02x\n", InvalidTSS) + fmt.Fprintf(w, "#define SegmentNotPresent 0x%02x\n", SegmentNotPresent) + fmt.Fprintf(w, "#define StackSegmentFault 0x%02x\n", StackSegmentFault) + fmt.Fprintf(w, "#define GeneralProtectionFault 0x%02x\n", GeneralProtectionFault) + fmt.Fprintf(w, "#define PageFault 0x%02x\n", PageFault) + fmt.Fprintf(w, "#define X87FloatingPointException 0x%02x\n", X87FloatingPointException) + fmt.Fprintf(w, "#define AlignmentCheck 0x%02x\n", AlignmentCheck) + fmt.Fprintf(w, "#define MachineCheck 0x%02x\n", MachineCheck) + fmt.Fprintf(w, "#define SIMDFloatingPointException 0x%02x\n", SIMDFloatingPointException) + fmt.Fprintf(w, "#define VirtualizationException 0x%02x\n", VirtualizationException) + fmt.Fprintf(w, "#define SecurityException 0x%02x\n", SecurityException) + fmt.Fprintf(w, "#define SyscallInt80 0x%02x\n", SyscallInt80) + fmt.Fprintf(w, "#define Syscall 0x%02x\n", Syscall) + + p := &syscall.PtraceRegs{} + fmt.Fprintf(w, "\n// Ptrace registers.\n") + fmt.Fprintf(w, "#define PTRACE_R15 0x%02x\n", reflect.ValueOf(&p.R15).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R14 0x%02x\n", reflect.ValueOf(&p.R14).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R13 0x%02x\n", reflect.ValueOf(&p.R13).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R12 0x%02x\n", reflect.ValueOf(&p.R12).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RBP 0x%02x\n", reflect.ValueOf(&p.Rbp).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RBX 0x%02x\n", reflect.ValueOf(&p.Rbx).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R11 0x%02x\n", reflect.ValueOf(&p.R11).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R10 0x%02x\n", reflect.ValueOf(&p.R10).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R9 0x%02x\n", reflect.ValueOf(&p.R9).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_R8 0x%02x\n", reflect.ValueOf(&p.R8).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RAX 0x%02x\n", reflect.ValueOf(&p.Rax).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RCX 0x%02x\n", reflect.ValueOf(&p.Rcx).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RDX 0x%02x\n", reflect.ValueOf(&p.Rdx).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RSI 0x%02x\n", reflect.ValueOf(&p.Rsi).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RDI 0x%02x\n", reflect.ValueOf(&p.Rdi).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_ORIGRAX 0x%02x\n", reflect.ValueOf(&p.Orig_rax).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RIP 0x%02x\n", reflect.ValueOf(&p.Rip).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_CS 0x%02x\n", reflect.ValueOf(&p.Cs).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_FLAGS 0x%02x\n", reflect.ValueOf(&p.Eflags).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_RSP 0x%02x\n", reflect.ValueOf(&p.Rsp).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_SS 0x%02x\n", reflect.ValueOf(&p.Ss).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_FS 0x%02x\n", reflect.ValueOf(&p.Fs_base).Pointer()-reflect.ValueOf(p).Pointer()) + fmt.Fprintf(w, "#define PTRACE_GS 0x%02x\n", reflect.ValueOf(&p.Gs_base).Pointer()-reflect.ValueOf(p).Pointer()) +} diff --git a/pkg/sentry/platform/ring0/pagetables/BUILD b/pkg/sentry/platform/ring0/pagetables/BUILD new file mode 100644 index 000000000..c0c481ab3 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/BUILD @@ -0,0 +1,32 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "pagetables", + srcs = [ + "pagetables.go", + "pagetables_amd64.go", + "pagetables_unsafe.go", + "pagetables_x86.go", + "pcids_x86.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/ring0/pagetables", + visibility = [ + "//pkg/sentry/platform/kvm:__subpackages__", + "//pkg/sentry/platform/ring0:__subpackages__", + ], + deps = ["//pkg/sentry/usermem"], +) + +go_test( + name = "pagetables_test", + size = "small", + srcs = [ + "pagetables_test.go", + "pagetables_x86_test.go", + "pcids_x86_test.go", + ], + embed = [":pagetables"], + deps = ["//pkg/sentry/usermem"], +) diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables.go b/pkg/sentry/platform/ring0/pagetables/pagetables.go new file mode 100644 index 000000000..3cbf0bfa5 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables.go @@ -0,0 +1,193 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pagetables provides a generic implementation of pagetables. +package pagetables + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// Node is a single node within a set of page tables. +type Node struct { + // unalignedData has unaligned data. Unfortunately, we can't really + // rely on the allocator to give us what we want here. So we just throw + // it at the wall and use the portion that matches. Gross. This may be + // changed in the future to use a different allocation mechanism. + // + // Access must happen via functions found in pagetables_unsafe.go. + unalignedData [(2 * usermem.PageSize) - 1]byte + + // physical is the translated address of these entries. + // + // This is filled in at creation time. + physical uintptr +} + +// PageTables is a set of page tables. +type PageTables struct { + mu sync.Mutex + + // root is the pagetable root. + root *Node + + // translater is the translater passed at creation. + translater Translater + + // archPageTables includes architecture-specific features. + archPageTables + + // allNodes is a set of nodes indexed by translater address. + allNodes map[uintptr]*Node +} + +// Translater translates to guest physical addresses. +type Translater interface { + // TranslateToPhysical translates the given pointer object into a + // "physical" address. We do not require that it translates back, the + // reverse mapping is maintained internally. + TranslateToPhysical(*PTEs) uintptr +} + +// New returns new PageTables. +func New(t Translater, opts Opts) *PageTables { + p := &PageTables{ + translater: t, + allNodes: make(map[uintptr]*Node), + } + p.root = p.allocNode() + p.init(opts) + return p +} + +// New returns a new set of PageTables derived from the given one. +// +// This function should always be preferred to New if there are existing +// pagetables, as this function preserves architectural constraints relevant to +// managing multiple sets of pagetables. +func (p *PageTables) New() *PageTables { + np := &PageTables{ + translater: p.translater, + allNodes: make(map[uintptr]*Node), + } + np.root = np.allocNode() + np.initFrom(&p.archPageTables) + return np +} + +// setPageTable sets the given index as a page table. +func (p *PageTables) setPageTable(n *Node, index int, child *Node) { + phys := p.translater.TranslateToPhysical(child.PTEs()) + p.allNodes[phys] = child + pte := &n.PTEs()[index] + pte.setPageTable(phys) +} + +// clearPageTable clears the given entry. +func (p *PageTables) clearPageTable(n *Node, index int) { + pte := &n.PTEs()[index] + physical := pte.Address() + pte.Clear() + delete(p.allNodes, physical) +} + +// getPageTable returns the page table entry. +func (p *PageTables) getPageTable(n *Node, index int) *Node { + pte := &n.PTEs()[index] + physical := pte.Address() + child := p.allNodes[physical] + return child +} + +// Map installs a mapping with the given physical address. +// +// True is returned iff there was a previous mapping in the range. +// +// Precondition: addr & length must be aligned, their sum must not overflow. +func (p *PageTables) Map(addr usermem.Addr, length uintptr, user bool, at usermem.AccessType, physical uintptr) bool { + if at == usermem.NoAccess { + return p.Unmap(addr, length) + } + prev := false + p.mu.Lock() + end, ok := addr.AddLength(uint64(length)) + if !ok { + panic("pagetables.Map: overflow") + } + p.iterateRange(uintptr(addr), uintptr(end), true, func(s, e uintptr, pte *PTE, align uintptr) { + p := physical + (s - uintptr(addr)) + prev = prev || (pte.Valid() && (p != pte.Address() || at.Write != pte.Writeable() || at.Execute != pte.Executable())) + if p&align != 0 { + // We will install entries at a smaller granulaity if + // we don't install a valid entry here, however we must + // zap any existing entry to ensure this happens. + pte.Clear() + return + } + pte.Set(p, at.Write, at.Execute, user) + }) + p.mu.Unlock() + return prev +} + +// Unmap unmaps the given range. +// +// True is returned iff there was a previous mapping in the range. +func (p *PageTables) Unmap(addr usermem.Addr, length uintptr) bool { + p.mu.Lock() + count := 0 + p.iterateRange(uintptr(addr), uintptr(addr)+length, false, func(s, e uintptr, pte *PTE, align uintptr) { + pte.Clear() + count++ + }) + p.mu.Unlock() + return count > 0 +} + +// Release releases this address space. +// +// This must be called to release the PCID. +func (p *PageTables) Release() { + // Clear all pages. + p.Unmap(0, ^uintptr(0)) + p.release() +} + +// Lookup returns the physical address for the given virtual address. +func (p *PageTables) Lookup(addr usermem.Addr) (physical uintptr, accessType usermem.AccessType) { + mask := uintptr(usermem.PageSize - 1) + off := uintptr(addr) & mask + addr = addr &^ usermem.Addr(mask) + p.iterateRange(uintptr(addr), uintptr(addr+usermem.PageSize), false, func(s, e uintptr, pte *PTE, align uintptr) { + if !pte.Valid() { + return + } + physical = pte.Address() + (s - uintptr(addr)) + off + accessType = usermem.AccessType{ + Read: true, + Write: pte.Writeable(), + Execute: pte.Executable(), + } + }) + return physical, accessType +} + +// allocNode allocates a new page. +func (p *PageTables) allocNode() *Node { + n := new(Node) + n.physical = p.translater.TranslateToPhysical(n.PTEs()) + return n +} diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables_amd64.go b/pkg/sentry/platform/ring0/pagetables/pagetables_amd64.go new file mode 100644 index 000000000..b89665c96 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables_amd64.go @@ -0,0 +1,397 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package pagetables + +import ( + "fmt" + "sync/atomic" +) + +// Address constraints. +// +// The lowerTop and upperBottom currently apply to four-level pagetables; +// additional refactoring would be necessary to support five-level pagetables. +const ( + lowerTop = 0x00007fffffffffff + upperBottom = 0xffff800000000000 + + pteShift = 12 + pmdShift = 21 + pudShift = 30 + pgdShift = 39 + + pteMask = 0x1ff << pteShift + pmdMask = 0x1ff << pmdShift + pudMask = 0x1ff << pudShift + pgdMask = 0x1ff << pgdShift + + pteSize = 1 << pteShift + pmdSize = 1 << pmdShift + pudSize = 1 << pudShift + pgdSize = 1 << pgdShift +) + +// Bits in page table entries. +const ( + present = 0x001 + writable = 0x002 + user = 0x004 + writeThrough = 0x008 + cacheDisable = 0x010 + accessed = 0x020 + dirty = 0x040 + super = 0x080 + executeDisable = 1 << 63 +) + +// PTE is a page table entry. +type PTE uint64 + +// Clear clears this PTE, including super page information. +func (p *PTE) Clear() { + atomic.StoreUint64((*uint64)(p), 0) +} + +// Valid returns true iff this entry is valid. +func (p *PTE) Valid() bool { + return atomic.LoadUint64((*uint64)(p))&present != 0 +} + +// Writeable returns true iff the page is writable. +func (p *PTE) Writeable() bool { + return atomic.LoadUint64((*uint64)(p))&writable != 0 +} + +// User returns true iff the page is user-accessible. +func (p *PTE) User() bool { + return atomic.LoadUint64((*uint64)(p))&user != 0 +} + +// Executable returns true iff the page is executable. +func (p *PTE) Executable() bool { + return atomic.LoadUint64((*uint64)(p))&executeDisable == 0 +} + +// SetSuper sets this page as a super page. +// +// The page must not be valid or a panic will result. +func (p *PTE) SetSuper() { + if p.Valid() { + // This is not allowed. + panic("SetSuper called on valid page!") + } + atomic.StoreUint64((*uint64)(p), super) +} + +// IsSuper returns true iff this page is a super page. +func (p *PTE) IsSuper() bool { + return atomic.LoadUint64((*uint64)(p))&super != 0 +} + +// Set sets this PTE value. +func (p *PTE) Set(addr uintptr, write, execute bool, userAccessible bool) { + v := uint64(addr)&^uint64(0xfff) | present | accessed + if userAccessible { + v |= user + } + if !execute { + v |= executeDisable + } + if write { + v |= writable | dirty + } + if p.IsSuper() { + v |= super + } + atomic.StoreUint64((*uint64)(p), v) +} + +// setPageTable sets this PTE value and forces the write bit and super bit to +// be cleared. This is used explicitly for breaking super pages. +func (p *PTE) setPageTable(addr uintptr) { + v := uint64(addr)&^uint64(0xfff) | present | user | writable | accessed | dirty + atomic.StoreUint64((*uint64)(p), v) +} + +// Address extracts the address. This should only be used if Valid returns true. +func (p *PTE) Address() uintptr { + return uintptr(atomic.LoadUint64((*uint64)(p)) & ^uint64(executeDisable|0xfff)) +} + +// entriesPerPage is the number of PTEs per page. +const entriesPerPage = 512 + +// PTEs is a collection of entries. +type PTEs [entriesPerPage]PTE + +// next returns the next address quantized by the given size. +func next(start uint64, size uint64) uint64 { + start &= ^(size - 1) + start += size + return start +} + +// iterateRange iterates over all appropriate levels of page tables for the given range. +// +// If alloc is set, then Set _must_ be called on all given PTEs. The exception +// is super pages. If a valid super page cannot be installed, then the walk +// will continue to individual entries. +// +// This algorithm will attempt to maximize the use of super pages whenever +// possible. Whether a super page is provided will be clear through the range +// provided in the callback. +// +// Note that if alloc set, then no gaps will be present. However, if alloc is +// not set, then the iteration will likely be full of gaps. +// +// Note that this function should generally be avoided in favor of Map, Unmap, +// etc. when not necessary. +// +// Precondition: startAddr and endAddr must be page-aligned. +// +// Precondition: startStart must be less than endAddr. +// +// Precondition: If alloc is set, then startAddr and endAddr should not span +// non-canonical ranges. If they do, a panic will result. +func (p *PageTables) iterateRange(startAddr, endAddr uintptr, alloc bool, fn func(s, e uintptr, pte *PTE, align uintptr)) { + start := uint64(startAddr) + end := uint64(endAddr) + if start%pteSize != 0 { + panic(fmt.Sprintf("unaligned start: %v", start)) + } + if start > end { + panic(fmt.Sprintf("start > end (%v > %v))", start, end)) + } + + // Deal with cases where we traverse the "gap". + // + // These are all explicitly disallowed if alloc is set, and we must + // traverse an entry for each address explicitly. + switch { + case start < lowerTop && end > lowerTop && end < upperBottom: + if alloc { + panic(fmt.Sprintf("alloc [%x, %x) spans non-canonical range", start, end)) + } + p.iterateRange(startAddr, lowerTop, false, fn) + return + case start < lowerTop && end > lowerTop: + if alloc { + panic(fmt.Sprintf("alloc [%x, %x) spans non-canonical range", start, end)) + } + p.iterateRange(startAddr, lowerTop, false, fn) + p.iterateRange(upperBottom, endAddr, false, fn) + return + case start > lowerTop && end < upperBottom: + if alloc { + panic(fmt.Sprintf("alloc [%x, %x) spans non-canonical range", start, end)) + } + return + case start > lowerTop && start < upperBottom && end > upperBottom: + if alloc { + panic(fmt.Sprintf("alloc [%x, %x) spans non-canonical range", start, end)) + } + p.iterateRange(upperBottom, endAddr, false, fn) + return + } + + for pgdIndex := int((start & pgdMask) >> pgdShift); start < end && pgdIndex < entriesPerPage; pgdIndex++ { + pgdEntry := &p.root.PTEs()[pgdIndex] + if !pgdEntry.Valid() { + if !alloc { + // Skip over this entry. + start = next(start, pgdSize) + continue + } + + // Allocate a new pgd. + p.setPageTable(p.root, pgdIndex, p.allocNode()) + } + + // Map the next level. + pudNode := p.getPageTable(p.root, pgdIndex) + clearPUDEntries := 0 + + for pudIndex := int((start & pudMask) >> pudShift); start < end && pudIndex < entriesPerPage; pudIndex++ { + pudEntry := &(pudNode.PTEs()[pudIndex]) + if !pudEntry.Valid() { + if !alloc { + // Skip over this entry. + clearPUDEntries++ + start = next(start, pudSize) + continue + } + + // This level has 1-GB super pages. Is this + // entire region contained in a single PUD + // entry? If so, we can skip allocating a new + // page for the pmd. + if start&(pudSize-1) == 0 && end-start >= pudSize { + pudEntry.SetSuper() + fn(uintptr(start), uintptr(start+pudSize), pudEntry, pudSize-1) + if pudEntry.Valid() { + start = next(start, pudSize) + continue + } + } + + // Allocate a new pud. + p.setPageTable(pudNode, pudIndex, p.allocNode()) + + } else if pudEntry.IsSuper() { + // Does this page need to be split? + if start&(pudSize-1) != 0 || end < next(start, pudSize) { + currentAddr := uint64(pudEntry.Address()) + writeable := pudEntry.Writeable() + executable := pudEntry.Executable() + user := pudEntry.User() + + // Install the relevant entries. + pmdNode := p.allocNode() + pmdEntries := pmdNode.PTEs() + for index := 0; index < entriesPerPage; index++ { + pmdEntry := &pmdEntries[index] + pmdEntry.SetSuper() + pmdEntry.Set(uintptr(currentAddr), writeable, executable, user) + currentAddr += pmdSize + } + + // Reset to point to the new page. + p.setPageTable(pudNode, pudIndex, pmdNode) + } else { + // A super page to be checked directly. + fn(uintptr(start), uintptr(start+pudSize), pudEntry, pudSize-1) + + // Might have been cleared. + if !pudEntry.Valid() { + clearPUDEntries++ + } + + // Note that the super page was changed. + start = next(start, pudSize) + continue + } + } + + // Map the next level, since this is valid. + pmdNode := p.getPageTable(pudNode, pudIndex) + clearPMDEntries := 0 + + for pmdIndex := int((start & pmdMask) >> pmdShift); start < end && pmdIndex < entriesPerPage; pmdIndex++ { + pmdEntry := &pmdNode.PTEs()[pmdIndex] + if !pmdEntry.Valid() { + if !alloc { + // Skip over this entry. + clearPMDEntries++ + start = next(start, pmdSize) + continue + } + + // This level has 2-MB huge pages. If this + // region is contined in a single PMD entry? + // As above, we can skip allocating a new page. + if start&(pmdSize-1) == 0 && end-start >= pmdSize { + pmdEntry.SetSuper() + fn(uintptr(start), uintptr(start+pmdSize), pmdEntry, pmdSize-1) + if pmdEntry.Valid() { + start = next(start, pmdSize) + continue + } + } + + // Allocate a new pmd. + p.setPageTable(pmdNode, pmdIndex, p.allocNode()) + + } else if pmdEntry.IsSuper() { + // Does this page need to be split? + if start&(pmdSize-1) != 0 || end < next(start, pmdSize) { + currentAddr := uint64(pmdEntry.Address()) + writeable := pmdEntry.Writeable() + executable := pmdEntry.Executable() + user := pmdEntry.User() + + // Install the relevant entries. + pteNode := p.allocNode() + pteEntries := pteNode.PTEs() + for index := 0; index < entriesPerPage; index++ { + pteEntry := &pteEntries[index] + pteEntry.Set(uintptr(currentAddr), writeable, executable, user) + currentAddr += pteSize + } + + // Reset to point to the new page. + p.setPageTable(pmdNode, pmdIndex, pteNode) + } else { + // A huge page to be checked directly. + fn(uintptr(start), uintptr(start+pmdSize), pmdEntry, pmdSize-1) + + // Might have been cleared. + if !pmdEntry.Valid() { + clearPMDEntries++ + } + + // Note that the huge page was changed. + start = next(start, pmdSize) + continue + } + } + + // Map the next level, since this is valid. + pteNode := p.getPageTable(pmdNode, pmdIndex) + clearPTEEntries := 0 + + for pteIndex := int((start & pteMask) >> pteShift); start < end && pteIndex < entriesPerPage; pteIndex++ { + pteEntry := &pteNode.PTEs()[pteIndex] + if !pteEntry.Valid() && !alloc { + clearPTEEntries++ + start += pteSize + continue + } + + // At this point, we are guaranteed that start%pteSize == 0. + fn(uintptr(start), uintptr(start+pteSize), pteEntry, pteSize-1) + if !pteEntry.Valid() { + if alloc { + panic("PTE not set after iteration with alloc=true!") + } + clearPTEEntries++ + } + + // Note that the pte was changed. + start += pteSize + continue + } + + // Check if we no longer need this page. + if clearPTEEntries == entriesPerPage { + p.clearPageTable(pmdNode, pmdIndex) + clearPMDEntries++ + } + } + + // Check if we no longer need this page. + if clearPMDEntries == entriesPerPage { + p.clearPageTable(pudNode, pudIndex) + clearPUDEntries++ + } + } + + // Check if we no longer need this page. + if clearPUDEntries == entriesPerPage { + p.clearPageTable(p.root, pgdIndex) + } + } +} diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables_test.go b/pkg/sentry/platform/ring0/pagetables/pagetables_test.go new file mode 100644 index 000000000..9cbc0e3b0 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables_test.go @@ -0,0 +1,161 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pagetables + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +type reflectTranslater struct{} + +func (r reflectTranslater) TranslateToPhysical(ptes *PTEs) uintptr { + return reflect.ValueOf(ptes).Pointer() +} + +type mapping struct { + start uintptr + length uintptr + addr uintptr + writeable bool +} + +func checkMappings(t *testing.T, pt *PageTables, m []mapping) { + var ( + current int + found []mapping + failed string + ) + + // Iterate over all the mappings. + pt.iterateRange(0, ^uintptr(0), false, func(s, e uintptr, pte *PTE, align uintptr) { + found = append(found, mapping{ + start: s, + length: e - s, + addr: pte.Address(), + writeable: pte.Writeable(), + }) + if failed != "" { + // Don't keep looking for errors. + return + } + + if current >= len(m) { + failed = "more mappings than expected" + } else if m[current].start != s { + failed = "start didn't match expected" + } else if m[current].length != (e - s) { + failed = "end didn't match expected" + } else if m[current].addr != pte.Address() { + failed = "address didn't match expected" + } else if m[current].writeable != pte.Writeable() { + failed = "writeable didn't match" + } + current++ + }) + + // Were we expected additional mappings? + if failed == "" && current != len(m) { + failed = "insufficient mappings found" + } + + // Emit a meaningful error message on failure. + if failed != "" { + t.Errorf("%s; got %#v, wanted %#v", failed, found, m) + } +} + +func TestAllocFree(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + pt.Release() +} + +func TestUnmap(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map and unmap one entry. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + pt.Unmap(0x400000, pteSize) + + checkMappings(t, pt, nil) + pt.Release() +} + +func TestReadOnly(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map one entry. + pt.Map(0x400000, pteSize, true, usermem.Read, pteSize*42) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, false}, + }) + pt.Release() +} + +func TestReadWrite(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map one entry. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, true}, + }) + pt.Release() +} + +func TestSerialEntries(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map two sequential entries. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + pt.Map(0x401000, pteSize, true, usermem.ReadWrite, pteSize*47) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, true}, + {0x401000, pteSize, pteSize * 47, true}, + }) + pt.Release() +} + +func TestSpanningEntries(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Span a pgd with two pages. + pt.Map(0x00007efffffff000, 2*pteSize, true, usermem.Read, pteSize*42) + + checkMappings(t, pt, []mapping{ + {0x00007efffffff000, pteSize, pteSize * 42, false}, + {0x00007f0000000000, pteSize, pteSize * 43, false}, + }) + pt.Release() +} + +func TestSparseEntries(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map two entries in different pgds. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + pt.Map(0x00007f0000000000, pteSize, true, usermem.Read, pteSize*47) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, true}, + {0x00007f0000000000, pteSize, pteSize * 47, false}, + }) + pt.Release() +} diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables_unsafe.go b/pkg/sentry/platform/ring0/pagetables/pagetables_unsafe.go new file mode 100644 index 000000000..a2b44fb79 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables_unsafe.go @@ -0,0 +1,31 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pagetables + +import ( + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// PTEs returns aligned PTE entries. +func (n *Node) PTEs() *PTEs { + addr := uintptr(unsafe.Pointer(&n.unalignedData[0])) + offset := addr & (usermem.PageSize - 1) + if offset != 0 { + offset = usermem.PageSize - offset + } + return (*PTEs)(unsafe.Pointer(addr + offset)) +} diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables_x86.go b/pkg/sentry/platform/ring0/pagetables/pagetables_x86.go new file mode 100644 index 000000000..dac66373f --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables_x86.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package pagetables + +// Opts are pagetable options. +type Opts struct { + EnablePCID bool +} + +// archPageTables has x86-specific features. +type archPageTables struct { + // pcids is the PCID database. + pcids *PCIDs + + // pcid is the globally unique identifier, or zero if none were + // available or pcids is nil. + pcid uint16 +} + +// init initializes arch-specific features. +func (a *archPageTables) init(opts Opts) { + if opts.EnablePCID { + a.pcids = NewPCIDs() + a.pcid = a.pcids.allocate() + } +} + +// initFrom initializes arch-specific features from an existing entry.' +func (a *archPageTables) initFrom(other *archPageTables) { + a.pcids = other.pcids // Refer to the same PCID database. + if a.pcids != nil { + a.pcid = a.pcids.allocate() + } +} + +// release is called from Release. +func (a *archPageTables) release() { + // Return the PCID. + if a.pcids != nil { + a.pcids.free(a.pcid) + } +} + +// CR3 returns the CR3 value for these tables. +// +// This may be called in interrupt contexts. +// +//go:nosplit +func (p *PageTables) CR3() uint64 { + // Bit 63 is set to avoid flushing the PCID (per SDM 4.10.4.1). + const noFlushBit uint64 = 0x8000000000000000 + if p.pcid != 0 { + return noFlushBit | uint64(p.root.physical) | uint64(p.pcid) + } + return uint64(p.root.physical) +} + +// FlushCR3 returns the CR3 value that flushes the TLB. +// +// This may be called in interrupt contexts. +// +//go:nosplit +func (p *PageTables) FlushCR3() uint64 { + return uint64(p.root.physical) | uint64(p.pcid) +} diff --git a/pkg/sentry/platform/ring0/pagetables/pagetables_x86_test.go b/pkg/sentry/platform/ring0/pagetables/pagetables_x86_test.go new file mode 100644 index 000000000..1fc403c48 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pagetables_x86_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package pagetables + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +func Test2MAnd4K(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map a small page and a huge page. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + pt.Map(0x00007f0000000000, 1<<21, true, usermem.Read, pmdSize*47) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, true}, + {0x00007f0000000000, pmdSize, pmdSize * 47, false}, + }) + pt.Release() +} + +func Test1GAnd4K(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map a small page and a super page. + pt.Map(0x400000, pteSize, true, usermem.ReadWrite, pteSize*42) + pt.Map(0x00007f0000000000, pudSize, true, usermem.Read, pudSize*47) + + checkMappings(t, pt, []mapping{ + {0x400000, pteSize, pteSize * 42, true}, + {0x00007f0000000000, pudSize, pudSize * 47, false}, + }) + pt.Release() +} + +func TestSplit1GPage(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map a super page and knock out the middle. + pt.Map(0x00007f0000000000, pudSize, true, usermem.Read, pudSize*42) + pt.Unmap(usermem.Addr(0x00007f0000000000+pteSize), pudSize-(2*pteSize)) + + checkMappings(t, pt, []mapping{ + {0x00007f0000000000, pteSize, pudSize * 42, false}, + {0x00007f0000000000 + pudSize - pteSize, pteSize, pudSize*42 + pudSize - pteSize, false}, + }) + pt.Release() +} + +func TestSplit2MPage(t *testing.T) { + pt := New(reflectTranslater{}, Opts{}) + + // Map a huge page and knock out the middle. + pt.Map(0x00007f0000000000, pmdSize, true, usermem.Read, pmdSize*42) + pt.Unmap(usermem.Addr(0x00007f0000000000+pteSize), pmdSize-(2*pteSize)) + + checkMappings(t, pt, []mapping{ + {0x00007f0000000000, pteSize, pmdSize * 42, false}, + {0x00007f0000000000 + pmdSize - pteSize, pteSize, pmdSize*42 + pmdSize - pteSize, false}, + }) + pt.Release() +} diff --git a/pkg/sentry/platform/ring0/pagetables/pcids_x86.go b/pkg/sentry/platform/ring0/pagetables/pcids_x86.go new file mode 100644 index 000000000..509e8c0d9 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pcids_x86.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package pagetables + +import ( + "sync" +) + +// maxPCID is the maximum allowed PCID. +const maxPCID = 4095 + +// PCIDs is a simple PCID database. +type PCIDs struct { + mu sync.Mutex + + // last is the last fresh PCID given out (not including the available + // pool). If last >= maxPCID, then the only PCIDs available in the + // available pool below. + last uint16 + + // available are PCIDs that have been freed. + available map[uint16]struct{} +} + +// NewPCIDs returns a new PCID set. +func NewPCIDs() *PCIDs { + return &PCIDs{ + available: make(map[uint16]struct{}), + } +} + +// allocate returns an unused PCID, or zero if all are taken. +func (p *PCIDs) allocate() uint16 { + p.mu.Lock() + defer p.mu.Unlock() + if len(p.available) > 0 { + for id := range p.available { + delete(p.available, id) + return id + } + } + if id := p.last + 1; id <= maxPCID { + p.last = id + return id + } + // Nothing available. + return 0 +} + +// free returns a PCID to the pool. +// +// It is safe to call free with a zero pcid. That is, you may always call free +// with anything returned by allocate. +func (p *PCIDs) free(id uint16) { + p.mu.Lock() + defer p.mu.Unlock() + if id != 0 { + p.available[id] = struct{}{} + } +} diff --git a/pkg/sentry/platform/ring0/pagetables/pcids_x86_test.go b/pkg/sentry/platform/ring0/pagetables/pcids_x86_test.go new file mode 100644 index 000000000..0b555cd76 --- /dev/null +++ b/pkg/sentry/platform/ring0/pagetables/pcids_x86_test.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package pagetables + +import ( + "testing" +) + +func TestMaxPCID(t *testing.T) { + p := NewPCIDs() + for i := 0; i < maxPCID; i++ { + if id := p.allocate(); id != uint16(i+1) { + t.Errorf("got %d, expected %d", id, i+1) + } + } + if id := p.allocate(); id != 0 { + if id != 0 { + t.Errorf("got %d, expected 0", id) + } + } +} + +func TestFirstPCID(t *testing.T) { + p := NewPCIDs() + if id := p.allocate(); id != 1 { + t.Errorf("got %d, expected 1", id) + } +} + +func TestFreePCID(t *testing.T) { + p := NewPCIDs() + p.free(0) + if id := p.allocate(); id != 1 { + t.Errorf("got %d, expected 1 (not zero)", id) + } +} + +func TestReusePCID(t *testing.T) { + p := NewPCIDs() + id := p.allocate() + if id != 1 { + t.Errorf("got %d, expected 1", id) + } + p.free(id) + if id := p.allocate(); id != 1 { + t.Errorf("got %d, expected 1", id) + } + if id := p.allocate(); id != 2 { + t.Errorf("got %d, expected 2", id) + } +} diff --git a/pkg/sentry/platform/ring0/ring0.go b/pkg/sentry/platform/ring0/ring0.go new file mode 100644 index 000000000..4991031c5 --- /dev/null +++ b/pkg/sentry/platform/ring0/ring0.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ring0 provides basic operating system-level stubs. +package ring0 diff --git a/pkg/sentry/platform/ring0/x86.go b/pkg/sentry/platform/ring0/x86.go new file mode 100644 index 000000000..e16f6c599 --- /dev/null +++ b/pkg/sentry/platform/ring0/x86.go @@ -0,0 +1,242 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build i386 amd64 + +package ring0 + +import ( + "gvisor.googlesource.com/gvisor/pkg/cpuid" +) + +// Useful bits. +const ( + _CR0_PE = 1 << 0 + _CR0_ET = 1 << 4 + _CR0_PG = 1 << 31 + + _CR4_PSE = 1 << 4 + _CR4_PAE = 1 << 5 + _CR4_PGE = 1 << 7 + _CR4_OSFXSR = 1 << 9 + _CR4_OSXMMEXCPT = 1 << 10 + _CR4_FSGSBASE = 1 << 16 + _CR4_PCIDE = 1 << 17 + _CR4_OSXSAVE = 1 << 18 + _CR4_SMEP = 1 << 20 + + _RFLAGS_AC = 1 << 18 + _RFLAGS_NT = 1 << 14 + _RFLAGS_IOPL = 3 << 12 + _RFLAGS_DF = 1 << 10 + _RFLAGS_IF = 1 << 9 + _RFLAGS_STEP = 1 << 8 + _RFLAGS_RESERVED = 1 << 1 + + _EFER_SCE = 0x001 + _EFER_LME = 0x100 + _EFER_NX = 0x800 + + _MSR_STAR = 0xc0000081 + _MSR_LSTAR = 0xc0000082 + _MSR_CSTAR = 0xc0000083 + _MSR_SYSCALL_MASK = 0xc0000084 +) + +// Vector is an exception vector. +type Vector uintptr + +// Exception vectors. +const ( + DivideByZero Vector = iota + Debug + NMI + Breakpoint + Overflow + BoundRangeExceeded + InvalidOpcode + DeviceNotAvailable + DoubleFault + CoprocessorSegmentOverrun + InvalidTSS + SegmentNotPresent + StackSegmentFault + GeneralProtectionFault + PageFault + _ + X87FloatingPointException + AlignmentCheck + MachineCheck + SIMDFloatingPointException + VirtualizationException + SecurityException = 0x1e + SyscallInt80 = 0x80 + _NR_INTERRUPTS = SyscallInt80 + 1 +) + +// System call vectors. +const ( + Syscall Vector = _NR_INTERRUPTS +) + +// VirtualAddressBits returns the number bits available for virtual addresses. +// +// Note that sign-extension semantics apply to the highest order bit. +// +// FIXME: This should use the cpuid passed to Init. +func VirtualAddressBits() uint32 { + ax, _, _, _ := cpuid.HostID(0x80000008, 0) + return (ax >> 8) & 0xff +} + +// PhysicalAddressBits returns the number of bits available for physical addresses. +// +// FIXME: This should use the cpuid passed to Init. +func PhysicalAddressBits() uint32 { + ax, _, _, _ := cpuid.HostID(0x80000008, 0) + return ax & 0xff +} + +// Selector is a segment Selector. +type Selector uint16 + +// SegmentDescriptor is a segment descriptor. +type SegmentDescriptor struct { + bits [2]uint32 +} + +// descriptorTable is a collection of descriptors. +type descriptorTable [32]SegmentDescriptor + +// SegmentDescriptorFlags are typed flags within a descriptor. +type SegmentDescriptorFlags uint32 + +// SegmentDescriptorFlag declarations. +const ( + SegmentDescriptorAccess SegmentDescriptorFlags = 1 << 8 // Access bit (always set). + SegmentDescriptorWrite = 1 << 9 // Write permission. + SegmentDescriptorExpandDown = 1 << 10 // Grows down, not used. + SegmentDescriptorExecute = 1 << 11 // Execute permission. + SegmentDescriptorSystem = 1 << 12 // Zero => system, 1 => user code/data. + SegmentDescriptorPresent = 1 << 15 // Present. + SegmentDescriptorAVL = 1 << 20 // Available. + SegmentDescriptorLong = 1 << 21 // Long mode. + SegmentDescriptorDB = 1 << 22 // 16 or 32-bit. + SegmentDescriptorG = 1 << 23 // Granularity: page or byte. +) + +// Base returns the descriptor's base linear address. +func (d *SegmentDescriptor) Base() uint32 { + return d.bits[1]&0xFF000000 | (d.bits[1]&0x000000FF)<<16 | d.bits[0]>>16 +} + +// Limit returns the descriptor size. +func (d *SegmentDescriptor) Limit() uint32 { + l := d.bits[0]&0xFFFF | d.bits[1]&0xF0000 + if d.bits[1]&uint32(SegmentDescriptorG) != 0 { + l <<= 12 + l |= 0xFFF + } + return l +} + +// Flags returns descriptor flags. +func (d *SegmentDescriptor) Flags() SegmentDescriptorFlags { + return SegmentDescriptorFlags(d.bits[1] & 0x00F09F00) +} + +// DPL returns the descriptor privilege level. +func (d *SegmentDescriptor) DPL() int { + return int((d.bits[1] >> 13) & 3) +} + +func (d *SegmentDescriptor) setNull() { + d.bits[0] = 0 + d.bits[1] = 0 +} + +func (d *SegmentDescriptor) set(base, limit uint32, dpl int, flags SegmentDescriptorFlags) { + flags |= SegmentDescriptorPresent + if limit>>12 != 0 { + limit >>= 12 + flags |= SegmentDescriptorG + } + d.bits[0] = base<<16 | limit&0xFFFF + d.bits[1] = base&0xFF000000 | (base>>16)&0xFF | limit&0x000F0000 | uint32(flags) | uint32(dpl)<<13 +} + +func (d *SegmentDescriptor) setCode32(base, limit uint32, dpl int) { + d.set(base, limit, dpl, + SegmentDescriptorDB| + SegmentDescriptorExecute| + SegmentDescriptorSystem) +} + +func (d *SegmentDescriptor) setCode64(base, limit uint32, dpl int) { + d.set(base, limit, dpl, + SegmentDescriptorG| + SegmentDescriptorLong| + SegmentDescriptorExecute| + SegmentDescriptorSystem) +} + +func (d *SegmentDescriptor) setData(base, limit uint32, dpl int) { + d.set(base, limit, dpl, + SegmentDescriptorWrite| + SegmentDescriptorSystem) +} + +// setHi is only used for the TSS segment, which is magically 64-bits. +func (d *SegmentDescriptor) setHi(base uint32) { + d.bits[0] = base + d.bits[1] = 0 +} + +// Gate64 is a 64-bit task, trap, or interrupt gate. +type Gate64 struct { + bits [4]uint32 +} + +// idt64 is a 64-bit interrupt descriptor table. +type idt64 [_NR_INTERRUPTS]Gate64 + +func (g *Gate64) setInterrupt(cs Selector, rip uint64, dpl int, ist int) { + g.bits[0] = uint32(cs)<<16 | uint32(rip)&0xFFFF + g.bits[1] = uint32(rip)&0xFFFF0000 | SegmentDescriptorPresent | uint32(dpl)<<13 | 14<<8 | uint32(ist)&0x7 + g.bits[2] = uint32(rip >> 32) +} + +func (g *Gate64) setTrap(cs Selector, rip uint64, dpl int, ist int) { + g.setInterrupt(cs, rip, dpl, ist) + g.bits[1] |= 1 << 8 +} + +// TaskState64 is a 64-bit task state structure. +type TaskState64 struct { + _ uint32 + rsp0Lo, rsp0Hi uint32 + rsp1Lo, rsp1Hi uint32 + rsp2Lo, rsp2Hi uint32 + _ [2]uint32 + ist1Lo, ist1Hi uint32 + ist2Lo, ist2Hi uint32 + ist3Lo, ist3Hi uint32 + ist4Lo, ist4Hi uint32 + ist5Lo, ist5Hi uint32 + ist6Lo, ist6Hi uint32 + ist7Lo, ist7Hi uint32 + _ [2]uint32 + _ uint16 + ioPerm uint16 +} diff --git a/pkg/sentry/platform/safecopy/BUILD b/pkg/sentry/platform/safecopy/BUILD new file mode 100644 index 000000000..8b9f29403 --- /dev/null +++ b/pkg/sentry/platform/safecopy/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "safecopy", + srcs = [ + "atomic_amd64.s", + "memclr_amd64.s", + "memcpy_amd64.s", + "safecopy.go", + "safecopy_unsafe.go", + "sighandler_amd64.s", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/syserror", + ], +) + +go_test( + name = "safecopy_test", + srcs = [ + "safecopy_test.go", + ], + embed = [":safecopy"], +) diff --git a/pkg/sentry/platform/safecopy/atomic_amd64.s b/pkg/sentry/platform/safecopy/atomic_amd64.s new file mode 100644 index 000000000..69947dec3 --- /dev/null +++ b/pkg/sentry/platform/safecopy/atomic_amd64.s @@ -0,0 +1,108 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// handleSwapUint32Fault returns the value stored in DI. Control is transferred +// to it when swapUint32 below receives SIGSEGV or SIGBUS, with the signal +// number stored in DI. +// +// It must have the same frame configuration as swapUint32 so that it can undo +// any potential call frame set up by the assembler. +TEXT handleSwapUint32Fault(SB), NOSPLIT, $0-24 + MOVL DI, sig+20(FP) + RET + +// swapUint32 atomically stores new into *addr and returns (the previous *addr +// value, 0). If a SIGSEGV or SIGBUS signal is received during the swap, the +// value of old is unspecified, and sig is the number of the signal that was +// received. +// +// Preconditions: addr must be aligned to a 4-byte boundary. +// +//func swapUint32(ptr unsafe.Pointer, new uint32) (old uint32, sig int32) +TEXT ·swapUint32(SB), NOSPLIT, $0-24 + // Store 0 as the returned signal number. If we run to completion, + // this is the value the caller will see; if a signal is received, + // handleSwapUint32Fault will store a different value in this address. + MOVL $0, sig+20(FP) + + MOVQ addr+0(FP), DI + MOVL new+8(FP), AX + XCHGL AX, 0(DI) + MOVL AX, old+16(FP) + RET + +// handleSwapUint64Fault returns the value stored in DI. Control is transferred +// to it when swapUint64 below receives SIGSEGV or SIGBUS, with the signal +// number stored in DI. +// +// It must have the same frame configuration as swapUint64 so that it can undo +// any potential call frame set up by the assembler. +TEXT handleSwapUint64Fault(SB), NOSPLIT, $0-28 + MOVL DI, sig+24(FP) + RET + +// swapUint64 atomically stores new into *addr and returns (the previous *addr +// value, 0). If a SIGSEGV or SIGBUS signal is received during the swap, the +// value of old is unspecified, and sig is the number of the signal that was +// received. +// +// Preconditions: addr must be aligned to a 8-byte boundary. +// +//func swapUint64(ptr unsafe.Pointer, new uint64) (old uint64, sig int32) +TEXT ·swapUint64(SB), NOSPLIT, $0-28 + // Store 0 as the returned signal number. If we run to completion, + // this is the value the caller will see; if a signal is received, + // handleSwapUint64Fault will store a different value in this address. + MOVL $0, sig+24(FP) + + MOVQ addr+0(FP), DI + MOVQ new+8(FP), AX + XCHGQ AX, 0(DI) + MOVQ AX, old+16(FP) + RET + +// handleCompareAndSwapUint32Fault returns the value stored in DI. Control is +// transferred to it when swapUint64 below receives SIGSEGV or SIGBUS, with the +// signal number stored in DI. +// +// It must have the same frame configuration as compareAndSwapUint32 so that it +// can undo any potential call frame set up by the assembler. +TEXT handleCompareAndSwapUint32Fault(SB), NOSPLIT, $0-24 + MOVL DI, sig+20(FP) + RET + +// compareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// (the value previously stored at addr, 0). If a SIGSEGV or SIGBUS signal is +// received during the operation, the value of prev is unspecified, and sig is +// the number of the signal that was received. +// +// Preconditions: addr must be aligned to a 4-byte boundary. +// +//func compareAndSwapUint32(ptr unsafe.Pointer, old, new uint32) (prev uint32, sig int32) +TEXT ·compareAndSwapUint32(SB), NOSPLIT, $0-24 + // Store 0 as the returned signal number. If we run to completion, this is + // the value the caller will see; if a signal is received, + // handleCompareAndSwapUint32Fault will store a different value in this + // address. + MOVL $0, sig+20(FP) + + MOVQ addr+0(FP), DI + MOVL old+8(FP), AX + MOVL new+12(FP), DX + LOCK + CMPXCHGL DX, 0(DI) + MOVL AX, prev+16(FP) + RET diff --git a/pkg/sentry/platform/safecopy/memclr_amd64.s b/pkg/sentry/platform/safecopy/memclr_amd64.s new file mode 100644 index 000000000..7d1019f60 --- /dev/null +++ b/pkg/sentry/platform/safecopy/memclr_amd64.s @@ -0,0 +1,157 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// handleMemclrFault returns (the value stored in AX, the value stored in DI). +// Control is transferred to it when memclr below receives SIGSEGV or SIGBUS, +// with the faulting address stored in AX and the signal number stored in DI. +// +// It must have the same frame configuration as memclr so that it can undo any +// potential call frame set up by the assembler. +TEXT handleMemclrFault(SB), NOSPLIT, $0-28 + MOVQ AX, addr+16(FP) + MOVL DI, sig+24(FP) + RET + +// memclr sets the n bytes following ptr to zeroes. If a SIGSEGV or SIGBUS +// signal is received during the write, it returns the address that caused the +// fault and the number of the signal that was received. Otherwise, it returns +// an unspecified address and a signal number of 0. +// +// Data is written in order, such that if a fault happens at address p, it is +// safe to assume that all data before p-maxRegisterSize has already been +// successfully written. +// +// The code is derived from runtime.memclrNoHeapPointers. +// +// func memclr(ptr unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32) +TEXT ·memclr(SB), NOSPLIT, $0-28 + // Store 0 as the returned signal number. If we run to completion, + // this is the value the caller will see; if a signal is received, + // handleMemclrFault will store a different value in this address. + MOVL $0, sig+24(FP) + + MOVQ ptr+0(FP), DI + MOVQ n+8(FP), BX + XORQ AX, AX + + // MOVOU seems always faster than REP STOSQ. +tail: + TESTQ BX, BX + JEQ _0 + CMPQ BX, $2 + JBE _1or2 + CMPQ BX, $4 + JBE _3or4 + CMPQ BX, $8 + JB _5through7 + JE _8 + CMPQ BX, $16 + JBE _9through16 + PXOR X0, X0 + CMPQ BX, $32 + JBE _17through32 + CMPQ BX, $64 + JBE _33through64 + CMPQ BX, $128 + JBE _65through128 + CMPQ BX, $256 + JBE _129through256 + // TODO: use branch table and BSR to make this just a single dispatch + // TODO: for really big clears, use MOVNTDQ, even without AVX2. + +loop: + MOVOU X0, 0(DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, 64(DI) + MOVOU X0, 80(DI) + MOVOU X0, 96(DI) + MOVOU X0, 112(DI) + MOVOU X0, 128(DI) + MOVOU X0, 144(DI) + MOVOU X0, 160(DI) + MOVOU X0, 176(DI) + MOVOU X0, 192(DI) + MOVOU X0, 208(DI) + MOVOU X0, 224(DI) + MOVOU X0, 240(DI) + SUBQ $256, BX + ADDQ $256, DI + CMPQ BX, $256 + JAE loop + JMP tail + +_1or2: + MOVB AX, (DI) + MOVB AX, -1(DI)(BX*1) + RET +_0: + RET +_3or4: + MOVW AX, (DI) + MOVW AX, -2(DI)(BX*1) + RET +_5through7: + MOVL AX, (DI) + MOVL AX, -4(DI)(BX*1) + RET +_8: + // We need a separate case for 8 to make sure we clear pointers atomically. + MOVQ AX, (DI) + RET +_9through16: + MOVQ AX, (DI) + MOVQ AX, -8(DI)(BX*1) + RET +_17through32: + MOVOU X0, (DI) + MOVOU X0, -16(DI)(BX*1) + RET +_33through64: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET +_65through128: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, -64(DI)(BX*1) + MOVOU X0, -48(DI)(BX*1) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET +_129through256: + MOVOU X0, (DI) + MOVOU X0, 16(DI) + MOVOU X0, 32(DI) + MOVOU X0, 48(DI) + MOVOU X0, 64(DI) + MOVOU X0, 80(DI) + MOVOU X0, 96(DI) + MOVOU X0, 112(DI) + MOVOU X0, -128(DI)(BX*1) + MOVOU X0, -112(DI)(BX*1) + MOVOU X0, -96(DI)(BX*1) + MOVOU X0, -80(DI)(BX*1) + MOVOU X0, -64(DI)(BX*1) + MOVOU X0, -48(DI)(BX*1) + MOVOU X0, -32(DI)(BX*1) + MOVOU X0, -16(DI)(BX*1) + RET diff --git a/pkg/sentry/platform/safecopy/memcpy_amd64.s b/pkg/sentry/platform/safecopy/memcpy_amd64.s new file mode 100644 index 000000000..96ef2eefc --- /dev/null +++ b/pkg/sentry/platform/safecopy/memcpy_amd64.s @@ -0,0 +1,242 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// handleMemcpyFault returns (the value stored in AX, the value stored in DI). +// Control is transferred to it when memcpy below receives SIGSEGV or SIGBUS, +// with the faulting address stored in AX and the signal number stored in DI. +// +// It must have the same frame configuration as memcpy so that it can undo any +// potential call frame set up by the assembler. +TEXT handleMemcpyFault(SB), NOSPLIT, $0-36 + MOVQ AX, addr+24(FP) + MOVL DI, sig+32(FP) + RET + +// memcpy copies data from src to dst. If a SIGSEGV or SIGBUS signal is received +// during the copy, it returns the address that caused the fault and the number +// of the signal that was received. Otherwise, it returns an unspecified address +// and a signal number of 0. +// +// Data is copied in order, such that if a fault happens at address p, it is +// safe to assume that all data before p-maxRegisterSize has already been +// successfully copied. +// +// The code is derived from the forward copying part of runtime.memmove. +// +// func memcpy(dst, src unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32) +TEXT ·memcpy(SB), NOSPLIT, $0-36 + // Store 0 as the returned signal number. If we run to completion, + // this is the value the caller will see; if a signal is received, + // handleMemcpyFault will store a different value in this address. + MOVL $0, sig+32(FP) + + MOVQ to+0(FP), DI + MOVQ from+8(FP), SI + MOVQ n+16(FP), BX + + // REP instructions have a high startup cost, so we handle small sizes + // with some straightline code. The REP MOVSQ instruction is really fast + // for large sizes. The cutover is approximately 2K. +tail: + // move_129through256 or smaller work whether or not the source and the + // destination memory regions overlap because they load all data into + // registers before writing it back. move_256through2048 on the other + // hand can be used only when the memory regions don't overlap or the copy + // direction is forward. + TESTQ BX, BX + JEQ move_0 + CMPQ BX, $2 + JBE move_1or2 + CMPQ BX, $4 + JBE move_3or4 + CMPQ BX, $8 + JB move_5through7 + JE move_8 + CMPQ BX, $16 + JBE move_9through16 + CMPQ BX, $32 + JBE move_17through32 + CMPQ BX, $64 + JBE move_33through64 + CMPQ BX, $128 + JBE move_65through128 + CMPQ BX, $256 + JBE move_129through256 + // TODO: use branch table and BSR to make this just a single dispatch + +/* + * forward copy loop + */ + CMPQ BX, $2048 + JLS move_256through2048 + + // Check alignment + MOVL SI, AX + ORL DI, AX + TESTL $7, AX + JEQ fwdBy8 + + // Do 1 byte at a time + MOVQ BX, CX + REP; MOVSB + RET + +fwdBy8: + // Do 8 bytes at a time + MOVQ BX, CX + SHRQ $3, CX + ANDQ $7, BX + REP; MOVSQ + JMP tail + +move_1or2: + MOVB (SI), AX + MOVB AX, (DI) + MOVB -1(SI)(BX*1), CX + MOVB CX, -1(DI)(BX*1) + RET +move_0: + RET +move_3or4: + MOVW (SI), AX + MOVW AX, (DI) + MOVW -2(SI)(BX*1), CX + MOVW CX, -2(DI)(BX*1) + RET +move_5through7: + MOVL (SI), AX + MOVL AX, (DI) + MOVL -4(SI)(BX*1), CX + MOVL CX, -4(DI)(BX*1) + RET +move_8: + // We need a separate case for 8 to make sure we write pointers atomically. + MOVQ (SI), AX + MOVQ AX, (DI) + RET +move_9through16: + MOVQ (SI), AX + MOVQ AX, (DI) + MOVQ -8(SI)(BX*1), CX + MOVQ CX, -8(DI)(BX*1) + RET +move_17through32: + MOVOU (SI), X0 + MOVOU X0, (DI) + MOVOU -16(SI)(BX*1), X1 + MOVOU X1, -16(DI)(BX*1) + RET +move_33through64: + MOVOU (SI), X0 + MOVOU X0, (DI) + MOVOU 16(SI), X1 + MOVOU X1, 16(DI) + MOVOU -32(SI)(BX*1), X2 + MOVOU X2, -32(DI)(BX*1) + MOVOU -16(SI)(BX*1), X3 + MOVOU X3, -16(DI)(BX*1) + RET +move_65through128: + MOVOU (SI), X0 + MOVOU X0, (DI) + MOVOU 16(SI), X1 + MOVOU X1, 16(DI) + MOVOU 32(SI), X2 + MOVOU X2, 32(DI) + MOVOU 48(SI), X3 + MOVOU X3, 48(DI) + MOVOU -64(SI)(BX*1), X4 + MOVOU X4, -64(DI)(BX*1) + MOVOU -48(SI)(BX*1), X5 + MOVOU X5, -48(DI)(BX*1) + MOVOU -32(SI)(BX*1), X6 + MOVOU X6, -32(DI)(BX*1) + MOVOU -16(SI)(BX*1), X7 + MOVOU X7, -16(DI)(BX*1) + RET +move_129through256: + MOVOU (SI), X0 + MOVOU X0, (DI) + MOVOU 16(SI), X1 + MOVOU X1, 16(DI) + MOVOU 32(SI), X2 + MOVOU X2, 32(DI) + MOVOU 48(SI), X3 + MOVOU X3, 48(DI) + MOVOU 64(SI), X4 + MOVOU X4, 64(DI) + MOVOU 80(SI), X5 + MOVOU X5, 80(DI) + MOVOU 96(SI), X6 + MOVOU X6, 96(DI) + MOVOU 112(SI), X7 + MOVOU X7, 112(DI) + MOVOU -128(SI)(BX*1), X8 + MOVOU X8, -128(DI)(BX*1) + MOVOU -112(SI)(BX*1), X9 + MOVOU X9, -112(DI)(BX*1) + MOVOU -96(SI)(BX*1), X10 + MOVOU X10, -96(DI)(BX*1) + MOVOU -80(SI)(BX*1), X11 + MOVOU X11, -80(DI)(BX*1) + MOVOU -64(SI)(BX*1), X12 + MOVOU X12, -64(DI)(BX*1) + MOVOU -48(SI)(BX*1), X13 + MOVOU X13, -48(DI)(BX*1) + MOVOU -32(SI)(BX*1), X14 + MOVOU X14, -32(DI)(BX*1) + MOVOU -16(SI)(BX*1), X15 + MOVOU X15, -16(DI)(BX*1) + RET +move_256through2048: + SUBQ $256, BX + MOVOU (SI), X0 + MOVOU X0, (DI) + MOVOU 16(SI), X1 + MOVOU X1, 16(DI) + MOVOU 32(SI), X2 + MOVOU X2, 32(DI) + MOVOU 48(SI), X3 + MOVOU X3, 48(DI) + MOVOU 64(SI), X4 + MOVOU X4, 64(DI) + MOVOU 80(SI), X5 + MOVOU X5, 80(DI) + MOVOU 96(SI), X6 + MOVOU X6, 96(DI) + MOVOU 112(SI), X7 + MOVOU X7, 112(DI) + MOVOU 128(SI), X8 + MOVOU X8, 128(DI) + MOVOU 144(SI), X9 + MOVOU X9, 144(DI) + MOVOU 160(SI), X10 + MOVOU X10, 160(DI) + MOVOU 176(SI), X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X15 + MOVOU X15, 240(DI) + CMPQ BX, $256 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + JGE move_256through2048 + JMP tail diff --git a/pkg/sentry/platform/safecopy/safecopy.go b/pkg/sentry/platform/safecopy/safecopy.go new file mode 100644 index 000000000..90a2aad7b --- /dev/null +++ b/pkg/sentry/platform/safecopy/safecopy.go @@ -0,0 +1,140 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package safecopy provides an efficient implementation of functions to access +// memory that may result in SIGSEGV or SIGBUS being sent to the accessor. +package safecopy + +import ( + "fmt" + "reflect" + "runtime" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// SegvError is returned when a safecopy function receives SIGSEGV. +type SegvError struct { + // Addr is the address at which the SIGSEGV occurred. + Addr uintptr +} + +// Error implements error.Error. +func (e SegvError) Error() string { + return fmt.Sprintf("SIGSEGV at %#x", e.Addr) +} + +// BusError is returned when a safecopy function receives SIGBUS. +type BusError struct { + // Addr is the address at which the SIGBUS occurred. + Addr uintptr +} + +// Error implements error.Error. +func (e BusError) Error() string { + return fmt.Sprintf("SIGBUS at %#x", e.Addr) +} + +// AlignmentError is returned when a safecopy function is passed an address +// that does not meet alignment requirements. +type AlignmentError struct { + // Addr is the invalid address. + Addr uintptr + + // Alignment is the required alignment. + Alignment uintptr +} + +// Error implements error.Error. +func (e AlignmentError) Error() string { + return fmt.Sprintf("address %#x is not aligned to a %d-byte boundary", e.Addr, e.Alignment) +} + +var ( + // The begin and end addresses below are for the functions that are + // checked by the signal handler. + memcpyBegin uintptr + memcpyEnd uintptr + memclrBegin uintptr + memclrEnd uintptr + swapUint32Begin uintptr + swapUint32End uintptr + swapUint64Begin uintptr + swapUint64End uintptr + compareAndSwapUint32Begin uintptr + compareAndSwapUint32End uintptr + + // savedSigSegVHandler is a pointer to the SIGSEGV handler that was + // configured before we replaced it with our own. We still call into it + // when we get a SIGSEGV that is not interesting to us. + savedSigSegVHandler uintptr + + // same a above, but for SIGBUS signals. + savedSigBusHandler uintptr +) + +// signalHandler is our replacement signal handler for SIGSEGV and SIGBUS +// signals. +func signalHandler() + +// FindEndAddress returns the end address (one byte beyond the last) of the +// function that contains the specified address (begin). +func FindEndAddress(begin uintptr) uintptr { + f := runtime.FuncForPC(begin) + if f != nil { + for p := begin; ; p++ { + g := runtime.FuncForPC(p) + if f != g { + return p + } + } + } + return begin +} + +// initializeAddresses initializes the addresses used by the signal handler. +func initializeAddresses() { + // The following functions are written in assembly language, so they won't + // be inlined by the existing compiler/linker. Tests will fail if this + // assumption is violated. + memcpyBegin = reflect.ValueOf(memcpy).Pointer() + memcpyEnd = FindEndAddress(memcpyBegin) + memclrBegin = reflect.ValueOf(memclr).Pointer() + memclrEnd = FindEndAddress(memclrBegin) + swapUint32Begin = reflect.ValueOf(swapUint32).Pointer() + swapUint32End = FindEndAddress(swapUint32Begin) + swapUint64Begin = reflect.ValueOf(swapUint64).Pointer() + swapUint64End = FindEndAddress(swapUint64Begin) + compareAndSwapUint32Begin = reflect.ValueOf(compareAndSwapUint32).Pointer() + compareAndSwapUint32End = FindEndAddress(compareAndSwapUint32Begin) +} + +func init() { + initializeAddresses() + if err := ReplaceSignalHandler(syscall.SIGSEGV, reflect.ValueOf(signalHandler).Pointer(), &savedSigSegVHandler); err != nil { + panic(fmt.Sprintf("Unable to set handler for SIGSEGV: %v", err)) + } + if err := ReplaceSignalHandler(syscall.SIGBUS, reflect.ValueOf(signalHandler).Pointer(), &savedSigBusHandler); err != nil { + panic(fmt.Sprintf("Unable to set handler for SIGBUS: %v", err)) + } + syserror.AddErrorUnwrapper(func(e error) (syscall.Errno, bool) { + switch e.(type) { + case SegvError, BusError, AlignmentError: + return syscall.EFAULT, true + default: + return 0, false + } + }) +} diff --git a/pkg/sentry/platform/safecopy/safecopy_test.go b/pkg/sentry/platform/safecopy/safecopy_test.go new file mode 100644 index 000000000..67df36121 --- /dev/null +++ b/pkg/sentry/platform/safecopy/safecopy_test.go @@ -0,0 +1,617 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safecopy + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "runtime/debug" + "syscall" + "testing" + "unsafe" +) + +// Size of a page in bytes. Cloned from usermem.PageSize to avoid a circular +// dependency. +const pageSize = 4096 + +func initRandom(b []byte) { + for i := range b { + b[i] = byte(rand.Intn(256)) + } +} + +func randBuf(size int) []byte { + b := make([]byte, size) + initRandom(b) + return b +} + +func TestCopyInSuccess(t *testing.T) { + // Test that CopyIn does not return an error when all pages are accessible. + const bufLen = 8192 + a := randBuf(bufLen) + b := make([]byte, bufLen) + + n, err := CopyIn(b, unsafe.Pointer(&a[0])) + if n != bufLen { + t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !bytes.Equal(a, b) { + t.Errorf("Buffers are not equal when they should be: %v %v", a, b) + } +} + +func TestCopyOutSuccess(t *testing.T) { + // Test that CopyOut does not return an error when all pages are + // accessible. + const bufLen = 8192 + a := randBuf(bufLen) + b := make([]byte, bufLen) + + n, err := CopyOut(unsafe.Pointer(&b[0]), a) + if n != bufLen { + t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !bytes.Equal(a, b) { + t.Errorf("Buffers are not equal when they should be: %v %v", a, b) + } +} + +func TestCopySuccess(t *testing.T) { + // Test that Copy does not return an error when all pages are accessible. + const bufLen = 8192 + a := randBuf(bufLen) + b := make([]byte, bufLen) + + n, err := Copy(unsafe.Pointer(&b[0]), unsafe.Pointer(&a[0]), bufLen) + if n != bufLen { + t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !bytes.Equal(a, b) { + t.Errorf("Buffers are not equal when they should be: %v %v", a, b) + } +} + +func TestZeroOutSuccess(t *testing.T) { + // Test that ZeroOut does not return an error when all pages are + // accessible. + const bufLen = 8192 + a := make([]byte, bufLen) + b := randBuf(bufLen) + + n, err := ZeroOut(unsafe.Pointer(&b[0]), bufLen) + if n != bufLen { + t.Errorf("Unexpected copy length, got %v, want %v", n, bufLen) + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !bytes.Equal(a, b) { + t.Errorf("Buffers are not equal when they should be: %v %v", a, b) + } +} + +func TestSwapUint32Success(t *testing.T) { + // Test that SwapUint32 does not return an error when the page is + // accessible. + before := uint32(rand.Int31()) + after := uint32(rand.Int31()) + val := before + + old, err := SwapUint32(unsafe.Pointer(&val), after) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if old != before { + t.Errorf("Unexpected old value: got %v, want %v", old, before) + } + if val != after { + t.Errorf("Unexpected new value: got %v, want %v", val, after) + } +} + +func TestSwapUint32AlignmentError(t *testing.T) { + // Test that SwapUint32 returns an AlignmentError when passed an unaligned + // address. + data := new(struct{ val uint64 }) + addr := uintptr(unsafe.Pointer(&data.val)) + 1 + want := AlignmentError{Addr: addr, Alignment: 4} + if _, err := SwapUint32(unsafe.Pointer(addr), 1); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } +} + +func TestSwapUint64Success(t *testing.T) { + // Test that SwapUint64 does not return an error when the page is + // accessible. + before := uint64(rand.Int63()) + after := uint64(rand.Int63()) + // "The first word in ... an allocated struct or slice can be relied upon + // to be 64-bit aligned." - sync/atomic docs + data := new(struct{ val uint64 }) + data.val = before + + old, err := SwapUint64(unsafe.Pointer(&data.val), after) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if old != before { + t.Errorf("Unexpected old value: got %v, want %v", old, before) + } + if data.val != after { + t.Errorf("Unexpected new value: got %v, want %v", data.val, after) + } +} + +func TestSwapUint64AlignmentError(t *testing.T) { + // Test that SwapUint64 returns an AlignmentError when passed an unaligned + // address. + data := new(struct{ val1, val2 uint64 }) + addr := uintptr(unsafe.Pointer(&data.val1)) + 1 + want := AlignmentError{Addr: addr, Alignment: 8} + if _, err := SwapUint64(unsafe.Pointer(addr), 1); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } +} + +func TestCompareAndSwapUint32Success(t *testing.T) { + // Test that CompareAndSwapUint32 does not return an error when the page is + // accessible. + before := uint32(rand.Int31()) + after := uint32(rand.Int31()) + val := before + + old, err := CompareAndSwapUint32(unsafe.Pointer(&val), before, after) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if old != before { + t.Errorf("Unexpected old value: got %v, want %v", old, before) + } + if val != after { + t.Errorf("Unexpected new value: got %v, want %v", val, after) + } +} + +func TestCompareAndSwapUint32AlignmentError(t *testing.T) { + // Test that CompareAndSwapUint32 returns an AlignmentError when passed an + // unaligned address. + data := new(struct{ val uint64 }) + addr := uintptr(unsafe.Pointer(&data.val)) + 1 + want := AlignmentError{Addr: addr, Alignment: 4} + if _, err := CompareAndSwapUint32(unsafe.Pointer(addr), 0, 1); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } +} + +// withSegvErrorTestMapping calls fn with a two-page mapping. The first page +// contains random data, and the second page generates SIGSEGV when accessed. +func withSegvErrorTestMapping(t *testing.T, fn func(m []byte)) { + mapping, err := syscall.Mmap(-1, 0, 2*pageSize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE) + if err != nil { + t.Fatalf("Mmap failed: %v", err) + } + defer syscall.Munmap(mapping) + if err := syscall.Mprotect(mapping[pageSize:], syscall.PROT_NONE); err != nil { + t.Fatalf("Mprotect failed: %v", err) + } + initRandom(mapping[:pageSize]) + + fn(mapping) +} + +// withBusErrorTestMapping calls fn with a two-page mapping. The first page +// contains random data, and the second page generates SIGBUS when accessed. +func withBusErrorTestMapping(t *testing.T, fn func(m []byte)) { + f, err := ioutil.TempFile("", "sigbus_test") + if err != nil { + t.Fatalf("TempFile failed: %v", err) + } + defer f.Close() + if err := f.Truncate(pageSize); err != nil { + t.Fatalf("Truncate failed: %v", err) + } + mapping, err := syscall.Mmap(int(f.Fd()), 0, 2*pageSize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + t.Fatalf("Mmap failed: %v", err) + } + defer syscall.Munmap(mapping) + initRandom(mapping[:pageSize]) + + fn(mapping) +} + +func TestCopyInSegvError(t *testing.T) { + // Test that CopyIn returns a SegvError when reaching a page that signals + // SIGSEGV. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + src := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + dst := randBuf(pageSize) + n, err := CopyIn(dst, src) + if n != bytesBeforeFault { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopyInBusError(t *testing.T) { + // Test that CopyIn returns a BusError when reaching a page that signals + // SIGBUS. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) { + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + src := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + dst := randBuf(pageSize) + n, err := CopyIn(dst, src) + if n != bytesBeforeFault { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopyOutSegvError(t *testing.T) { + // Test that CopyOut returns a SegvError when reaching a page that signals + // SIGSEGV. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + src := randBuf(pageSize) + n, err := CopyOut(dst, src) + if n != bytesBeforeFault { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopyOutBusError(t *testing.T) { + // Test that CopyOut returns a BusError when reaching a page that signals + // SIGBUS. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + src := randBuf(pageSize) + n, err := CopyOut(dst, src) + if n != bytesBeforeFault { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopySourceSegvError(t *testing.T) { + // Test that Copy returns a SegvError when copying from a page that signals + // SIGSEGV. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + src := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + dst := randBuf(pageSize) + n, err := Copy(unsafe.Pointer(&dst[0]), src, pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopySourceBusError(t *testing.T) { + // Test that Copy returns a BusError when copying from a page that signals + // SIGBUS. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) { + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + src := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + dst := randBuf(pageSize) + n, err := Copy(unsafe.Pointer(&dst[0]), src, pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := dst[:bytesBeforeFault], mapping[pageSize-bytesBeforeFault:pageSize]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopyDestinationSegvError(t *testing.T) { + // Test that Copy returns a SegvError when copying to a page that signals + // SIGSEGV. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + src := randBuf(pageSize) + n, err := Copy(dst, unsafe.Pointer(&src[0]), pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestCopyDestinationBusError(t *testing.T) { + // Test that Copy returns a BusError when copying to a page that signals + // SIGBUS. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting copy %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) { + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + src := randBuf(pageSize) + n, err := Copy(dst, unsafe.Pointer(&src[0]), pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected copy length: got %v, want %v", n, bytesBeforeFault) + } + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], src[:bytesBeforeFault]; !bytes.Equal(got, want) { + t.Errorf("Buffers are not equal when they should be: %v %v", got, want) + } + }) + }) + } +} + +func TestZeroOutSegvError(t *testing.T) { + // Test that ZeroOut returns a SegvError when reaching a page that signals + // SIGSEGV. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting write %d bytes before SIGSEGV", bytesBeforeFault), func(t *testing.T) { + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + n, err := ZeroOut(dst, pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected write length: got %v, want %v", n, bytesBeforeFault) + } + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], make([]byte, bytesBeforeFault); !bytes.Equal(got, want) { + t.Errorf("Non-zero bytes in written part of mapping: %v", got) + } + }) + }) + } +} + +func TestZeroOutBusError(t *testing.T) { + // Test that ZeroOut returns a BusError when reaching a page that signals + // SIGBUS. + for bytesBeforeFault := 0; bytesBeforeFault <= 2*maxRegisterSize; bytesBeforeFault++ { + t.Run(fmt.Sprintf("starting write %d bytes before SIGBUS", bytesBeforeFault), func(t *testing.T) { + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + dst := unsafe.Pointer(secondPage - uintptr(bytesBeforeFault)) + n, err := ZeroOut(dst, pageSize) + if n != uintptr(bytesBeforeFault) { + t.Errorf("Unexpected write length: got %v, want %v", n, bytesBeforeFault) + } + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + if got, want := mapping[pageSize-bytesBeforeFault:pageSize], make([]byte, bytesBeforeFault); !bytes.Equal(got, want) { + t.Errorf("Non-zero bytes in written part of mapping: %v", got) + } + }) + }) + } +} + +func TestSwapUint32SegvError(t *testing.T) { + // Test that SwapUint32 returns a SegvError when reaching a page that + // signals SIGSEGV. + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := SwapUint32(unsafe.Pointer(secondPage), 1) + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func TestSwapUint32BusError(t *testing.T) { + // Test that SwapUint32 returns a BusError when reaching a page that + // signals SIGBUS. + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := SwapUint32(unsafe.Pointer(secondPage), 1) + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func TestSwapUint64SegvError(t *testing.T) { + // Test that SwapUint64 returns a SegvError when reaching a page that + // signals SIGSEGV. + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := SwapUint64(unsafe.Pointer(secondPage), 1) + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func TestSwapUint64BusError(t *testing.T) { + // Test that SwapUint64 returns a BusError when reaching a page that + // signals SIGBUS. + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := SwapUint64(unsafe.Pointer(secondPage), 1) + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func TestCompareAndSwapUint32SegvError(t *testing.T) { + // Test that CompareAndSwapUint32 returns a SegvError when reaching a page + // that signals SIGSEGV. + withSegvErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := CompareAndSwapUint32(unsafe.Pointer(secondPage), 0, 1) + if want := (SegvError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func TestCompareAndSwapUint32BusError(t *testing.T) { + // Test that CompareAndSwapUint32 returns a BusError when reaching a page + // that signals SIGBUS. + withBusErrorTestMapping(t, func(mapping []byte) { + secondPage := uintptr(unsafe.Pointer(&mapping[0])) + pageSize + _, err := CompareAndSwapUint32(unsafe.Pointer(secondPage), 0, 1) + if want := (BusError{secondPage}); err != want { + t.Errorf("Unexpected error: got %v, want %v", err, want) + } + }) +} + +func testCopy(dst, src []byte) (panicked bool) { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + debug.SetPanicOnFault(true) + copy(dst, src) + return +} + +func TestSegVOnMemmove(t *testing.T) { + // Test that SIGSEGVs received by runtime.memmove when *not* doing + // CopyIn or CopyOut work gets propagated to the runtime. + const bufLen = pageSize + a, err := syscall.Mmap(-1, 0, bufLen, syscall.PROT_NONE, syscall.MAP_ANON|syscall.MAP_PRIVATE) + if err != nil { + t.Fatalf("Mmap failed: %v", err) + + } + defer syscall.Munmap(a) + b := randBuf(bufLen) + + if !testCopy(b, a) { + t.Fatalf("testCopy didn't panic when it should have") + } + + if !testCopy(a, b) { + t.Fatalf("testCopy didn't panic when it should have") + } +} + +func TestSigbusOnMemmove(t *testing.T) { + // Test that SIGBUS received by runtime.memmove when *not* doing + // CopyIn or CopyOut work gets propagated to the runtime. + const bufLen = pageSize + f, err := ioutil.TempFile("", "sigbus_test") + if err != nil { + t.Fatalf("TempFile failed: %v", err) + } + os.Remove(f.Name()) + defer f.Close() + + a, err := syscall.Mmap(int(f.Fd()), 0, bufLen, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + t.Fatalf("Mmap failed: %v", err) + + } + defer syscall.Munmap(a) + b := randBuf(bufLen) + + if !testCopy(b, a) { + t.Fatalf("testCopy didn't panic when it should have") + } + + if !testCopy(a, b) { + t.Fatalf("testCopy didn't panic when it should have") + } +} diff --git a/pkg/sentry/platform/safecopy/safecopy_unsafe.go b/pkg/sentry/platform/safecopy/safecopy_unsafe.go new file mode 100644 index 000000000..72f243f8d --- /dev/null +++ b/pkg/sentry/platform/safecopy/safecopy_unsafe.go @@ -0,0 +1,315 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safecopy + +import ( + "fmt" + "syscall" + "unsafe" +) + +// maxRegisterSize is the maximum register size used in memcpy and memclr. It +// is used to decide by how much to rewind the copy (for memcpy) or zeroing +// (for memclr) before proceeding. +const maxRegisterSize = 16 + +// memcpy copies data from src to dst. If a SIGSEGV or SIGBUS signal is received +// during the copy, it returns the address that caused the fault and the number +// of the signal that was received. Otherwise, it returns an unspecified address +// and a signal number of 0. +// +// Data is copied in order, such that if a fault happens at address p, it is +// safe to assume that all data before p-maxRegisterSize has already been +// successfully copied. +// +//go:noescape +func memcpy(dst, src unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32) + +// memclr sets the n bytes following ptr to zeroes. If a SIGSEGV or SIGBUS +// signal is received during the write, it returns the address that caused the +// fault and the number of the signal that was received. Otherwise, it returns +// an unspecified address and a signal number of 0. +// +// Data is written in order, such that if a fault happens at address p, it is +// safe to assume that all data before p-maxRegisterSize has already been +// successfully written. +// +//go:noescape +func memclr(ptr unsafe.Pointer, n uintptr) (fault unsafe.Pointer, sig int32) + +// swapUint32 atomically stores new into *ptr and returns (the previous *ptr +// value, 0). If a SIGSEGV or SIGBUS signal is received during the swap, the +// value of old is unspecified, and sig is the number of the signal that was +// received. +// +// Preconditions: ptr must be aligned to a 4-byte boundary. +// +//go:noescape +func swapUint32(ptr unsafe.Pointer, new uint32) (old uint32, sig int32) + +// swapUint64 atomically stores new into *ptr and returns (the previous *ptr +// value, 0). If a SIGSEGV or SIGBUS signal is received during the swap, the +// value of old is unspecified, and sig is the number of the signal that was +// received. +// +// Preconditions: ptr must be aligned to a 8-byte boundary. +// +//go:noescape +func swapUint64(ptr unsafe.Pointer, new uint64) (old uint64, sig int32) + +// compareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// (the value previously stored at ptr, 0). If a SIGSEGV or SIGBUS signal is +// received during the operation, the value of prev is unspecified, and sig is +// the number of the signal that was received. +// +// Preconditions: ptr must be aligned to a 4-byte boundary. +// +//go:noescape +func compareAndSwapUint32(ptr unsafe.Pointer, old, new uint32) (prev uint32, sig int32) + +// CopyIn copies len(dst) bytes from src to dst. It returns the number of bytes +// copied and an error if SIGSEGV or SIGBUS is received while reading from src. +func CopyIn(dst []byte, src unsafe.Pointer) (int, error) { + toCopy := uintptr(len(dst)) + if len(dst) == 0 { + return 0, nil + } + + fault, sig := memcpy(unsafe.Pointer(&dst[0]), src, toCopy) + if sig == 0 { + return len(dst), nil + } + + if faultN, srcN := uintptr(fault), uintptr(src); faultN < srcN && faultN >= srcN+toCopy { + panic(fmt.Sprintf("CopyIn faulted at %#x, which is outside source [%#x, %#x)", faultN, srcN, srcN+toCopy)) + } + + // memcpy might have ended the copy up to maxRegisterSize bytes before + // fault, if an instruction caused a memory access that straddled two + // pages, and the second one faulted. Try to copy up to the fault. + faultN, srcN := uintptr(fault), uintptr(src) + var done int + if faultN-srcN > maxRegisterSize { + done = int(faultN - srcN - maxRegisterSize) + } + n, err := CopyIn(dst[done:int(faultN-srcN)], unsafe.Pointer(srcN+uintptr(done))) + done += n + if err != nil { + return done, err + } + return done, errorFromFaultSignal(fault, sig) +} + +// CopyOut copies len(src) bytes from src to dst. If returns the number of +// bytes done and an error if SIGSEGV or SIGBUS is received while writing to +// dst. +func CopyOut(dst unsafe.Pointer, src []byte) (int, error) { + toCopy := uintptr(len(src)) + if toCopy == 0 { + return 0, nil + } + + fault, sig := memcpy(dst, unsafe.Pointer(&src[0]), toCopy) + if sig == 0 { + return len(src), nil + } + + if faultN, dstN := uintptr(fault), uintptr(dst); faultN < dstN && faultN >= dstN+toCopy { + panic(fmt.Sprintf("CopyOut faulted at %#x, which is outside destination [%#x, %#x)", faultN, dstN, dstN+toCopy)) + } + + // memcpy might have ended the copy up to maxRegisterSize bytes before + // fault, if an instruction caused a memory access that straddled two + // pages, and the second one faulted. Try to copy up to the fault. + faultN, dstN := uintptr(fault), uintptr(dst) + var done int + if faultN-dstN > maxRegisterSize { + done = int(faultN - dstN - maxRegisterSize) + } + n, err := CopyOut(unsafe.Pointer(dstN+uintptr(done)), src[done:int(faultN-dstN)]) + done += n + if err != nil { + return done, err + } + return done, errorFromFaultSignal(fault, sig) +} + +// Copy copies toCopy bytes from src to dst. It returns the number of bytes +// copied and an error if SIGSEGV or SIGBUS is received while reading from src +// or writing to dst. +// +// Data is copied in order; if [src, src+toCopy) and [dst, dst+toCopy) overlap, +// the resulting contents of dst are unspecified. +func Copy(dst, src unsafe.Pointer, toCopy uintptr) (uintptr, error) { + if toCopy == 0 { + return 0, nil + } + + fault, sig := memcpy(dst, src, toCopy) + if sig == 0 { + return toCopy, nil + } + + // Did the fault occur while reading from src or writing to dst? + faultN, srcN, dstN := uintptr(fault), uintptr(src), uintptr(dst) + faultAfterSrc := ^uintptr(0) + if faultN >= srcN { + faultAfterSrc = faultN - srcN + } + faultAfterDst := ^uintptr(0) + if faultN >= dstN { + faultAfterDst = faultN - dstN + } + if faultAfterSrc >= toCopy && faultAfterDst >= toCopy { + panic(fmt.Sprintf("Copy faulted at %#x, which is outside source [%#x, %#x) and destination [%#x, %#x)", faultN, srcN, srcN+toCopy, dstN, dstN+toCopy)) + } + faultedAfter := faultAfterSrc + if faultedAfter > faultAfterDst { + faultedAfter = faultAfterDst + } + + // memcpy might have ended the copy up to maxRegisterSize bytes before + // fault, if an instruction caused a memory access that straddled two + // pages, and the second one faulted. Try to copy up to the fault. + var done uintptr + if faultedAfter > maxRegisterSize { + done = faultedAfter - maxRegisterSize + } + n, err := Copy(unsafe.Pointer(dstN+done), unsafe.Pointer(srcN+done), faultedAfter-done) + done += n + if err != nil { + return done, err + } + return done, errorFromFaultSignal(fault, sig) +} + +// ZeroOut writes toZero zero bytes to dst. It returns the number of bytes +// written and an error if SIGSEGV or SIGBUS is received while writing to dst. +func ZeroOut(dst unsafe.Pointer, toZero uintptr) (uintptr, error) { + if toZero == 0 { + return 0, nil + } + + fault, sig := memclr(dst, toZero) + if sig == 0 { + return toZero, nil + } + + if faultN, dstN := uintptr(fault), uintptr(dst); faultN < dstN && faultN >= dstN+toZero { + panic(fmt.Sprintf("ZeroOut faulted at %#x, which is outside destination [%#x, %#x)", faultN, dstN, dstN+toZero)) + } + + // memclr might have ended the write up to maxRegisterSize bytes before + // fault, if an instruction caused a memory access that straddled two + // pages, and the second one faulted. Try to write up to the fault. + faultN, dstN := uintptr(fault), uintptr(dst) + var done uintptr + if faultN-dstN > maxRegisterSize { + done = faultN - dstN - maxRegisterSize + } + n, err := ZeroOut(unsafe.Pointer(dstN+done), faultN-dstN-done) + done += n + if err != nil { + return done, err + } + return done, errorFromFaultSignal(fault, sig) +} + +// SwapUint32 is equivalent to sync/atomic.SwapUint32, except that it returns +// an error if SIGSEGV or SIGBUS is received while accessing ptr, or if ptr is +// not aligned to a 4-byte boundary. +func SwapUint32(ptr unsafe.Pointer, new uint32) (uint32, error) { + if addr := uintptr(ptr); addr&3 != 0 { + return 0, AlignmentError{addr, 4} + } + old, sig := swapUint32(ptr, new) + return old, errorFromFaultSignal(ptr, sig) +} + +// SwapUint64 is equivalent to sync/atomic.SwapUint64, except that it returns +// an error if SIGSEGV or SIGBUS is received while accessing ptr, or if ptr is +// not aligned to an 8-byte boundary. +func SwapUint64(ptr unsafe.Pointer, new uint64) (uint64, error) { + if addr := uintptr(ptr); addr&7 != 0 { + return 0, AlignmentError{addr, 8} + } + old, sig := swapUint64(ptr, new) + return old, errorFromFaultSignal(ptr, sig) +} + +// CompareAndSwapUint32 is equivalent to atomicbitops.CompareAndSwapUint32, +// except that it returns an error if SIGSEGV or SIGBUS is received while +// accessing ptr, or if ptr is not aligned to a 4-byte boundary. +func CompareAndSwapUint32(ptr unsafe.Pointer, old, new uint32) (uint32, error) { + if addr := uintptr(ptr); addr&3 != 0 { + return 0, AlignmentError{addr, 4} + } + prev, sig := compareAndSwapUint32(ptr, old, new) + return prev, errorFromFaultSignal(ptr, sig) +} + +func errorFromFaultSignal(addr unsafe.Pointer, sig int32) error { + switch sig { + case 0: + return nil + case int32(syscall.SIGSEGV): + return SegvError{uintptr(addr)} + case int32(syscall.SIGBUS): + return BusError{uintptr(addr)} + default: + panic(fmt.Sprintf("safecopy got unexpected signal %d at address %#x", sig, addr)) + } +} + +// ReplaceSignalHandler replaces the existing signal handler for the provided +// signal with the one that handles faults in safecopy-protected functions. +// +// It stores the value of the previously set handler in previous. +// +// This function will be called on initialization in order to install safecopy +// handlers for appropriate signals. These handlers will call the previous +// handler however, and if this is function is being used externally then the +// same courtesy is expected. +func ReplaceSignalHandler(sig syscall.Signal, handler uintptr, previous *uintptr) error { + var sa struct { + handler uintptr + flags uint64 + restorer uintptr + mask uint64 + } + const maskLen = 8 + + // Get the existing signal handler information, and save the current + // handler. Once we replace it, we will use this pointer to fall back to + // it when we receive other signals. + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGACTION, uintptr(sig), 0, uintptr(unsafe.Pointer(&sa)), maskLen, 0, 0); e != 0 { + return e + } + + // Fail if there isn't a previous handler. + if sa.handler == 0 { + return fmt.Errorf("previous handler for signal %x isn't set", sig) + } + + *previous = sa.handler + + // Install our own handler. + sa.handler = handler + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGACTION, uintptr(sig), uintptr(unsafe.Pointer(&sa)), 0, maskLen, 0, 0); e != 0 { + return e + } + + return nil +} diff --git a/pkg/sentry/platform/safecopy/sighandler_amd64.s b/pkg/sentry/platform/safecopy/sighandler_amd64.s new file mode 100644 index 000000000..a65cb0c26 --- /dev/null +++ b/pkg/sentry/platform/safecopy/sighandler_amd64.s @@ -0,0 +1,124 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// The signals handled by sigHandler. +#define SIGBUS 7 +#define SIGSEGV 11 + +// Offsets to the registers in context->uc_mcontext.gregs[]. +#define REG_RDI 0x68 +#define REG_RAX 0x90 +#define REG_IP 0xa8 + +// Offset to the si_addr field of siginfo. +#define SI_CODE 0x08 +#define SI_ADDR 0x10 + +// signalHandler is the signal handler for SIGSEGV and SIGBUS signals. It must +// not be set up as a handler to any other signals. +// +// If the instruction causing the signal is within a safecopy-protected +// function, the signal is handled such that execution resumes in the +// appropriate fault handling stub with AX containing the faulting address and +// DI containing the signal number. Otherwise control is transferred to the +// previously configured signal handler (savedSigSegvHandler or +// savedSigBusHandler). +// +// This function cannot be written in go because it runs whenever a signal is +// received by the thread (preempting whatever was running), which includes when +// garbage collector has stopped or isn't expecting any interactions (like +// barriers). +// +// The arguments are the following: +// DI - The signal number. +// SI - Pointer to siginfo_t structure. +// DX - Pointer to ucontext structure. +TEXT ·signalHandler(SB),NOSPLIT,$0 + // Check if the signal is from the kernel. + MOVQ $0x0, CX + CMPL CX, SI_CODE(SI) + JGE original_handler + + // Check if RIP is within the area we care about. + MOVQ REG_IP(DX), CX + CMPQ CX, ·memcpyBegin(SB) + JB not_memcpy + CMPQ CX, ·memcpyEnd(SB) + JAE not_memcpy + + // Modify the context such that execution will resume in the fault + // handler. + LEAQ handleMemcpyFault(SB), CX + JMP handle_fault + +not_memcpy: + CMPQ CX, ·memclrBegin(SB) + JB not_memclr + CMPQ CX, ·memclrEnd(SB) + JAE not_memclr + + LEAQ handleMemclrFault(SB), CX + JMP handle_fault + +not_memclr: + CMPQ CX, ·swapUint32Begin(SB) + JB not_swapuint32 + CMPQ CX, ·swapUint32End(SB) + JAE not_swapuint32 + + LEAQ handleSwapUint32Fault(SB), CX + JMP handle_fault + +not_swapuint32: + CMPQ CX, ·swapUint64Begin(SB) + JB not_swapuint64 + CMPQ CX, ·swapUint64End(SB) + JAE not_swapuint64 + + LEAQ handleSwapUint64Fault(SB), CX + JMP handle_fault + +not_swapuint64: + CMPQ CX, ·compareAndSwapUint32Begin(SB) + JB not_casuint32 + CMPQ CX, ·compareAndSwapUint32End(SB) + JAE not_casuint32 + + LEAQ handleCompareAndSwapUint32Fault(SB), CX + JMP handle_fault + +not_casuint32: +original_handler: + // Jump to the previous signal handler, which is likely the golang one. + XORQ CX, CX + MOVQ ·savedSigBusHandler(SB), AX + CMPL DI, $SIGSEGV + CMOVQEQ ·savedSigSegVHandler(SB), AX + JMP AX + +handle_fault: + // Entered with the address of the fault handler in RCX; store it in + // RIP. + MOVQ CX, REG_IP(DX) + + // Store the faulting address in RAX. + MOVQ SI_ADDR(SI), CX + MOVQ CX, REG_RAX(DX) + + // Store the signal number in EDI. + MOVL DI, REG_RDI(DX) + + RET diff --git a/pkg/sentry/safemem/BUILD b/pkg/sentry/safemem/BUILD new file mode 100644 index 000000000..dc4cfce41 --- /dev/null +++ b/pkg/sentry/safemem/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "safemem", + srcs = [ + "block_unsafe.go", + "io.go", + "safemem.go", + "seq_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/safemem", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/platform/safecopy", + ], +) + +go_test( + name = "safemem_test", + size = "small", + srcs = [ + "io_test.go", + "seq_test.go", + ], + embed = [":safemem"], +) diff --git a/pkg/sentry/safemem/block_unsafe.go b/pkg/sentry/safemem/block_unsafe.go new file mode 100644 index 000000000..0b58f6497 --- /dev/null +++ b/pkg/sentry/safemem/block_unsafe.go @@ -0,0 +1,269 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safemem + +import ( + "fmt" + "reflect" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/platform/safecopy" +) + +// A Block is a range of contiguous bytes, similar to []byte but with the +// following differences: +// +// - The memory represented by a Block may require the use of safecopy to +// access. +// +// - Block does not carry a capacity and cannot be expanded. +// +// Blocks are immutable and may be copied by value. The zero value of Block +// represents an empty range, analogous to a nil []byte. +type Block struct { + // [start, start+length) is the represented memory. + // + // start is an unsafe.Pointer to ensure that Block prevents the represented + // memory from being garbage-collected. + start unsafe.Pointer + length int + + // needSafecopy is true if accessing the represented memory requires the + // use of safecopy. + needSafecopy bool +} + +// BlockFromSafeSlice returns a Block equivalent to slice, which is safe to +// access without safecopy. +func BlockFromSafeSlice(slice []byte) Block { + return blockFromSlice(slice, false) +} + +// BlockFromUnsafeSlice returns a Block equivalent to bs, which is not safe to +// access without safecopy. +func BlockFromUnsafeSlice(slice []byte) Block { + return blockFromSlice(slice, true) +} + +func blockFromSlice(slice []byte, needSafecopy bool) Block { + if len(slice) == 0 { + return Block{} + } + return Block{ + start: unsafe.Pointer(&slice[0]), + length: len(slice), + needSafecopy: needSafecopy, + } +} + +// BlockFromSafePointer returns a Block equivalent to [ptr, ptr+len), which is +// safe to access without safecopy. +// +// Preconditions: ptr+len does not overflow. +func BlockFromSafePointer(ptr unsafe.Pointer, len int) Block { + return blockFromPointer(ptr, len, false) +} + +// BlockFromUnsafePointer returns a Block equivalent to [ptr, ptr+len), which +// is not safe to access without safecopy. +// +// Preconditions: ptr+len does not overflow. +func BlockFromUnsafePointer(ptr unsafe.Pointer, len int) Block { + return blockFromPointer(ptr, len, true) +} + +func blockFromPointer(ptr unsafe.Pointer, len int, needSafecopy bool) Block { + if uptr := uintptr(ptr); uptr+uintptr(len) < uptr { + panic(fmt.Sprintf("ptr %#x + len %#x overflows", ptr, len)) + } + return Block{ + start: ptr, + length: len, + needSafecopy: needSafecopy, + } +} + +// DropFirst returns a Block equivalent to b, but with the first n bytes +// omitted. It is analogous to the [n:] operation on a slice, except that if n +// > b.Len(), DropFirst returns an empty Block instead of panicking. +// +// Preconditions: n >= 0. +func (b Block) DropFirst(n int) Block { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return b.DropFirst64(uint64(n)) +} + +// DropFirst64 is equivalent to DropFirst but takes a uint64. +func (b Block) DropFirst64(n uint64) Block { + if n >= uint64(b.length) { + return Block{} + } + return Block{ + start: unsafe.Pointer(uintptr(b.start) + uintptr(n)), + length: b.length - int(n), + needSafecopy: b.needSafecopy, + } +} + +// TakeFirst returns a Block equivalent to the first n bytes of b. It is +// analogous to the [:n] operation on a slice, except that if n > b.Len(), +// TakeFirst returns a copy of b instead of panicking. +// +// Preconditions: n >= 0. +func (b Block) TakeFirst(n int) Block { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return b.TakeFirst64(uint64(n)) +} + +// TakeFirst64 is equivalent to TakeFirst but takes a uint64. +func (b Block) TakeFirst64(n uint64) Block { + if n == 0 { + return Block{} + } + if n >= uint64(b.length) { + return b + } + return Block{ + start: b.start, + length: int(n), + needSafecopy: b.needSafecopy, + } +} + +// ToSlice returns a []byte equivalent to b. +func (b Block) ToSlice() []byte { + var bs []byte + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&bs)) + hdr.Data = uintptr(b.start) + hdr.Len = b.length + hdr.Cap = b.length + return bs +} + +// Addr returns b's start address as a uintptr. It returns uintptr instead of +// unsafe.Pointer so that code using safemem cannot obtain unsafe.Pointers +// without importing the unsafe package explicitly. +// +// Note that a uintptr is not recognized as a pointer by the garbage collector, +// such that if there are no uses of b after a call to b.Addr() and the address +// is to Go-managed memory, the returned uintptr does not prevent garbage +// collection of the pointee. +func (b Block) Addr() uintptr { + return uintptr(b.start) +} + +// Len returns b's length in bytes. +func (b Block) Len() int { + return b.length +} + +// NeedSafecopy returns true if accessing b.ToSlice() requires the use of safecopy. +func (b Block) NeedSafecopy() bool { + return b.needSafecopy +} + +// String implements fmt.Stringer.String. +func (b Block) String() string { + if uintptr(b.start) == 0 && b.length == 0 { + return "<nil>" + } + var suffix string + if b.needSafecopy { + suffix = "*" + } + return fmt.Sprintf("[%#x-%#x)%s", uintptr(b.start), uintptr(b.start)+uintptr(b.length), suffix) +} + +// Copy copies src.Len() or dst.Len() bytes, whichever is less, from src +// to dst and returns the number of bytes copied. +// +// If src and dst overlap, the data stored in dst is unspecified. +func Copy(dst, src Block) (int, error) { + if !dst.needSafecopy && !src.needSafecopy { + return copy(dst.ToSlice(), src.ToSlice()), nil + } + + n := dst.length + if n > src.length { + n = src.length + } + if n == 0 { + return 0, nil + } + + switch { + case dst.needSafecopy && !src.needSafecopy: + return safecopy.CopyOut(dst.start, src.TakeFirst(n).ToSlice()) + case !dst.needSafecopy && src.needSafecopy: + return safecopy.CopyIn(dst.TakeFirst(n).ToSlice(), src.start) + case dst.needSafecopy && src.needSafecopy: + n64, err := safecopy.Copy(dst.start, src.start, uintptr(n)) + return int(n64), err + default: + panic("unreachable") + } +} + +// Zero sets all bytes in dst to 0 and returns the number of bytes zeroed. +func Zero(dst Block) (int, error) { + if !dst.needSafecopy { + bs := dst.ToSlice() + for i := range bs { + bs[i] = 0 + } + return len(bs), nil + } + + n64, err := safecopy.ZeroOut(dst.start, uintptr(dst.length)) + return int(n64), err +} + +// Safecopy atomics are no slower than non-safecopy atomics, so use the former +// even when !b.needSafecopy to get consistent alignment checking. + +// SwapUint32 invokes safecopy.SwapUint32 on the first 4 bytes of b. +// +// Preconditions: b.Len() >= 4. +func SwapUint32(b Block, new uint32) (uint32, error) { + if b.length < 4 { + panic(fmt.Sprintf("insufficient length: %d", b.length)) + } + return safecopy.SwapUint32(b.start, new) +} + +// SwapUint64 invokes safecopy.SwapUint64 on the first 8 bytes of b. +// +// Preconditions: b.Len() >= 8. +func SwapUint64(b Block, new uint64) (uint64, error) { + if b.length < 8 { + panic(fmt.Sprintf("insufficient length: %d", b.length)) + } + return safecopy.SwapUint64(b.start, new) +} + +// CompareAndSwapUint32 invokes safecopy.CompareAndSwapUint32 on the first 4 +// bytes of b. +// +// Preconditions: b.Len() >= 4. +func CompareAndSwapUint32(b Block, old, new uint32) (uint32, error) { + if b.length < 4 { + panic(fmt.Sprintf("insufficient length: %d", b.length)) + } + return safecopy.CompareAndSwapUint32(b.start, old, new) +} diff --git a/pkg/sentry/safemem/io.go b/pkg/sentry/safemem/io.go new file mode 100644 index 000000000..fd917648b --- /dev/null +++ b/pkg/sentry/safemem/io.go @@ -0,0 +1,339 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safemem + +import ( + "errors" + "io" + "math" +) + +// ErrEndOfBlockSeq is returned by BlockSeqWriter when attempting to write +// beyond the end of the BlockSeq. +var ErrEndOfBlockSeq = errors.New("write beyond end of BlockSeq") + +// Reader represents a streaming byte source like io.Reader. +type Reader interface { + // ReadToBlocks reads up to dsts.NumBytes() bytes into dsts and returns the + // number of bytes read. It may return a partial read without an error + // (i.e. (n, nil) where 0 < n < dsts.NumBytes()). It should not return a + // full read with an error (i.e. (dsts.NumBytes(), err) where err != nil); + // note that this differs from io.Reader.Read (in particular, io.EOF should + // not be returned if ReadToBlocks successfully reads dsts.NumBytes() + // bytes.) + ReadToBlocks(dsts BlockSeq) (uint64, error) +} + +// Writer represents a streaming byte sink like io.Writer. +type Writer interface { + // WriteFromBlocks writes up to srcs.NumBytes() bytes from srcs and returns + // the number of bytes written. It may return a partial write without an + // error (i.e. (n, nil) where 0 < n < srcs.NumBytes()). It should not + // return a full write with an error (i.e. srcs.NumBytes(), err) where err + // != nil). + WriteFromBlocks(srcs BlockSeq) (uint64, error) +} + +// ReadFullToBlocks repeatedly invokes r.ReadToBlocks until dsts.NumBytes() +// bytes have been read or ReadToBlocks returns an error. +func ReadFullToBlocks(r Reader, dsts BlockSeq) (uint64, error) { + var done uint64 + for !dsts.IsEmpty() { + n, err := r.ReadToBlocks(dsts) + done += n + if err != nil { + return done, err + } + dsts = dsts.DropFirst64(n) + } + return done, nil +} + +// WriteFullFromBlocks repeatedly invokes w.WriteFromBlocks until +// srcs.NumBytes() bytes have been written or WriteFromBlocks returns an error. +func WriteFullFromBlocks(w Writer, srcs BlockSeq) (uint64, error) { + var done uint64 + for !srcs.IsEmpty() { + n, err := w.WriteFromBlocks(srcs) + done += n + if err != nil { + return done, err + } + srcs = srcs.DropFirst64(n) + } + return done, nil +} + +// BlockSeqReader implements Reader by reading from a BlockSeq. +type BlockSeqReader struct { + Blocks BlockSeq +} + +// ReadToBlocks implements Reader.ReadToBlocks. +func (r *BlockSeqReader) ReadToBlocks(dsts BlockSeq) (uint64, error) { + n, err := CopySeq(dsts, r.Blocks) + r.Blocks = r.Blocks.DropFirst64(n) + if err != nil { + return n, err + } + if n < dsts.NumBytes() { + return n, io.EOF + } + return n, nil +} + +// BlockSeqWriter implements Writer by writing to a BlockSeq. +type BlockSeqWriter struct { + Blocks BlockSeq +} + +// WriteFromBlocks implements Writer.WriteFromBlocks. +func (w *BlockSeqWriter) WriteFromBlocks(srcs BlockSeq) (uint64, error) { + n, err := CopySeq(w.Blocks, srcs) + w.Blocks = w.Blocks.DropFirst64(n) + if err != nil { + return n, err + } + if n < srcs.NumBytes() { + return n, ErrEndOfBlockSeq + } + return n, nil +} + +// ReaderFunc implements Reader for a function with the semantics of +// Reader.ReadToBlocks. +type ReaderFunc func(dsts BlockSeq) (uint64, error) + +// ReadToBlocks implements Reader.ReadToBlocks. +func (f ReaderFunc) ReadToBlocks(dsts BlockSeq) (uint64, error) { + return f(dsts) +} + +// WriterFunc implements Writer for a function with the semantics of +// Writer.WriteFromBlocks. +type WriterFunc func(srcs BlockSeq) (uint64, error) + +// WriteFromBlocks implements Writer.WriteFromBlocks. +func (f WriterFunc) WriteFromBlocks(srcs BlockSeq) (uint64, error) { + return f(srcs) +} + +// ToIOReader implements io.Reader for a (safemem.)Reader. +// +// ToIOReader will return a successful partial read iff Reader.ReadToBlocks does +// so. +type ToIOReader struct { + Reader Reader +} + +// Read implements io.Reader.Read. +func (r ToIOReader) Read(dst []byte) (int, error) { + n, err := r.Reader.ReadToBlocks(BlockSeqOf(BlockFromSafeSlice(dst))) + return int(n), err +} + +// ToIOWriter implements io.Writer for a (safemem.)Writer. +type ToIOWriter struct { + Writer Writer +} + +// Write implements io.Writer.Write. +func (w ToIOWriter) Write(src []byte) (int, error) { + // io.Writer does not permit partial writes. + n, err := WriteFullFromBlocks(w.Writer, BlockSeqOf(BlockFromSafeSlice(src))) + return int(n), err +} + +// FromIOReader implements Reader for an io.Reader by repeatedly invoking +// io.Reader.Read until it returns an error or partial read. +// +// FromIOReader will return a successful partial read iff Reader.Read does so. +type FromIOReader struct { + Reader io.Reader +} + +// ReadToBlocks implements Reader.ReadToBlocks. +func (r FromIOReader) ReadToBlocks(dsts BlockSeq) (uint64, error) { + var buf []byte + var done uint64 + for !dsts.IsEmpty() { + dst := dsts.Head() + var n int + var err error + n, buf, err = r.readToBlock(dst, buf) + done += uint64(n) + if n != dst.Len() { + return done, err + } + dsts = dsts.Tail() + if err != nil { + if dsts.IsEmpty() && err == io.EOF { + return done, nil + } + return done, err + } + } + return done, nil +} + +func (r FromIOReader) readToBlock(dst Block, buf []byte) (int, []byte, error) { + // io.Reader isn't safecopy-aware, so we have to buffer Blocks that require + // safecopy. + if !dst.NeedSafecopy() { + n, err := r.Reader.Read(dst.ToSlice()) + return n, buf, err + } + if len(buf) < dst.Len() { + buf = make([]byte, dst.Len()) + } + rn, rerr := r.Reader.Read(buf[:dst.Len()]) + wbn, wberr := Copy(dst, BlockFromSafeSlice(buf[:rn])) + if wberr != nil { + return wbn, buf, wberr + } + return wbn, buf, rerr +} + +// FromIOWriter implements Writer for an io.Writer by repeatedly invoking +// io.Writer.Write until it returns an error or partial write. +// +// FromIOWriter will tolerate implementations of io.Writer.Write that return +// partial writes with a nil error in contravention of io.Writer's +// requirements, since Writer is permitted to do so. FromIOWriter will return a +// successful partial write iff Writer.Write does so. +type FromIOWriter struct { + Writer io.Writer +} + +// WriteFromBlocks implements Writer.WriteFromBlocks. +func (w FromIOWriter) WriteFromBlocks(srcs BlockSeq) (uint64, error) { + var buf []byte + var done uint64 + for !srcs.IsEmpty() { + src := srcs.Head() + var n int + var err error + n, buf, err = w.writeFromBlock(src, buf) + done += uint64(n) + if n != src.Len() || err != nil { + return done, err + } + srcs = srcs.Tail() + } + return done, nil +} + +func (w FromIOWriter) writeFromBlock(src Block, buf []byte) (int, []byte, error) { + // io.Writer isn't safecopy-aware, so we have to buffer Blocks that require + // safecopy. + if !src.NeedSafecopy() { + n, err := w.Writer.Write(src.ToSlice()) + return n, buf, err + } + if len(buf) < src.Len() { + buf = make([]byte, src.Len()) + } + bufn, buferr := Copy(BlockFromSafeSlice(buf[:src.Len()]), src) + wn, werr := w.Writer.Write(buf[:bufn]) + if werr != nil { + return wn, buf, werr + } + return wn, buf, buferr +} + +// FromVecReaderFunc implements Reader for a function that reads data into a +// [][]byte and returns the number of bytes read as an int64. +type FromVecReaderFunc struct { + ReadVec func(dsts [][]byte) (int64, error) +} + +// ReadToBlocks implements Reader.ReadToBlocks. +// +// ReadToBlocks calls r.ReadVec at most once. +func (r FromVecReaderFunc) ReadToBlocks(dsts BlockSeq) (uint64, error) { + if dsts.IsEmpty() { + return 0, nil + } + // Ensure that we don't pass a [][]byte with a total length > MaxInt64. + dsts = dsts.TakeFirst64(uint64(math.MaxInt64)) + dstSlices := make([][]byte, 0, dsts.NumBlocks()) + // Buffer Blocks that require safecopy. + for tmp := dsts; !tmp.IsEmpty(); tmp = tmp.Tail() { + dst := tmp.Head() + if dst.NeedSafecopy() { + dstSlices = append(dstSlices, make([]byte, dst.Len())) + } else { + dstSlices = append(dstSlices, dst.ToSlice()) + } + } + rn, rerr := r.ReadVec(dstSlices) + dsts = dsts.TakeFirst64(uint64(rn)) + var done uint64 + var i int + for !dsts.IsEmpty() { + dst := dsts.Head() + if dst.NeedSafecopy() { + n, err := Copy(dst, BlockFromSafeSlice(dstSlices[i])) + done += uint64(n) + if err != nil { + return done, err + } + } else { + done += uint64(dst.Len()) + } + dsts = dsts.Tail() + i++ + } + return done, rerr +} + +// FromVecWriterFunc implements Writer for a function that writes data from a +// [][]byte and returns the number of bytes written. +type FromVecWriterFunc struct { + WriteVec func(srcs [][]byte) (int64, error) +} + +// WriteFromBlocks implements Writer.WriteFromBlocks. +// +// WriteFromBlocks calls w.WriteVec at most once. +func (w FromVecWriterFunc) WriteFromBlocks(srcs BlockSeq) (uint64, error) { + if srcs.IsEmpty() { + return 0, nil + } + // Ensure that we don't pass a [][]byte with a total length > MaxInt64. + srcs = srcs.TakeFirst64(uint64(math.MaxInt64)) + srcSlices := make([][]byte, 0, srcs.NumBlocks()) + // Buffer Blocks that require safecopy. + var buferr error + for tmp := srcs; !tmp.IsEmpty(); tmp = tmp.Tail() { + src := tmp.Head() + if src.NeedSafecopy() { + slice := make([]byte, src.Len()) + n, err := Copy(BlockFromSafeSlice(slice), src) + srcSlices = append(srcSlices, slice[:n]) + if err != nil { + buferr = err + break + } + } else { + srcSlices = append(srcSlices, src.ToSlice()) + } + } + n, err := w.WriteVec(srcSlices) + if err != nil { + return uint64(n), err + } + return uint64(n), buferr +} diff --git a/pkg/sentry/safemem/io_test.go b/pkg/sentry/safemem/io_test.go new file mode 100644 index 000000000..edac4c1d7 --- /dev/null +++ b/pkg/sentry/safemem/io_test.go @@ -0,0 +1,199 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safemem + +import ( + "bytes" + "io" + "testing" +) + +func makeBlocks(slices ...[]byte) []Block { + blocks := make([]Block, 0, len(slices)) + for _, s := range slices { + blocks = append(blocks, BlockFromSafeSlice(s)) + } + return blocks +} + +func TestFromIOReaderFullRead(t *testing.T) { + r := FromIOReader{bytes.NewBufferString("foobar")} + dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) + n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) + if wantN := uint64(6); n != wantN || err != nil { + t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + for i, want := range [][]byte{[]byte("foo"), []byte("bar")} { + if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { + t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) + } + } +} + +type eofHidingReader struct { + Reader io.Reader +} + +func (r eofHidingReader) Read(dst []byte) (int, error) { + n, err := r.Reader.Read(dst) + if err == io.EOF { + return n, nil + } + return n, err +} + +func TestFromIOReaderPartialRead(t *testing.T) { + r := FromIOReader{eofHidingReader{bytes.NewBufferString("foob")}} + dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) + n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) + // FromIOReader should stop after the eofHidingReader returns (1, nil) + // for a 3-byte read. + if wantN := uint64(4); n != wantN || err != nil { + t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + for i, want := range [][]byte{[]byte("foo"), []byte("b\x00\x00")} { + if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { + t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) + } + } +} + +type singleByteReader struct { + Reader io.Reader +} + +func (r singleByteReader) Read(dst []byte) (int, error) { + if len(dst) == 0 { + return r.Reader.Read(dst) + } + return r.Reader.Read(dst[:1]) +} + +func TestSingleByteReader(t *testing.T) { + r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}} + dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) + n, err := r.ReadToBlocks(BlockSeqFromSlice(dsts)) + // FromIOReader should stop after the singleByteReader returns (1, nil) + // for a 3-byte read. + if wantN := uint64(1); n != wantN || err != nil { + t.Errorf("ReadToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + for i, want := range [][]byte{[]byte("f\x00\x00"), []byte("\x00\x00\x00")} { + if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { + t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) + } + } +} + +func TestReadFullToBlocks(t *testing.T) { + r := FromIOReader{singleByteReader{bytes.NewBufferString("foobar")}} + dsts := makeBlocks(make([]byte, 3), make([]byte, 3)) + n, err := ReadFullToBlocks(r, BlockSeqFromSlice(dsts)) + // ReadFullToBlocks should call into FromIOReader => singleByteReader + // repeatedly until dsts is exhausted. + if wantN := uint64(6); n != wantN || err != nil { + t.Errorf("ReadFullToBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + for i, want := range [][]byte{[]byte("foo"), []byte("bar")} { + if got := dsts[i].ToSlice(); !bytes.Equal(got, want) { + t.Errorf("dsts[%d]: got %q, wanted %q", i, got, want) + } + } +} + +func TestFromIOWriterFullWrite(t *testing.T) { + srcs := makeBlocks([]byte("foo"), []byte("bar")) + var dst bytes.Buffer + w := FromIOWriter{&dst} + n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) + if wantN := uint64(6); n != wantN || err != nil { + t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} + +type limitedWriter struct { + Writer io.Writer + Done int + Limit int +} + +func (w *limitedWriter) Write(src []byte) (int, error) { + count := len(src) + if count > (w.Limit - w.Done) { + count = w.Limit - w.Done + } + n, err := w.Writer.Write(src[:count]) + w.Done += n + return n, err +} + +func TestFromIOWriterPartialWrite(t *testing.T) { + srcs := makeBlocks([]byte("foo"), []byte("bar")) + var dst bytes.Buffer + w := FromIOWriter{&limitedWriter{&dst, 0, 4}} + n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) + // FromIOWriter should stop after the limitedWriter returns (1, nil) for a + // 3-byte write. + if wantN := uint64(4); n != wantN || err != nil { + t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} + +type singleByteWriter struct { + Writer io.Writer +} + +func (w singleByteWriter) Write(src []byte) (int, error) { + if len(src) == 0 { + return w.Writer.Write(src) + } + return w.Writer.Write(src[:1]) +} + +func TestSingleByteWriter(t *testing.T) { + srcs := makeBlocks([]byte("foo"), []byte("bar")) + var dst bytes.Buffer + w := FromIOWriter{singleByteWriter{&dst}} + n, err := w.WriteFromBlocks(BlockSeqFromSlice(srcs)) + // FromIOWriter should stop after the singleByteWriter returns (1, nil) + // for a 3-byte write. + if wantN := uint64(1); n != wantN || err != nil { + t.Errorf("WriteFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst.Bytes(), []byte("f"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} + +func TestWriteFullToBlocks(t *testing.T) { + srcs := makeBlocks([]byte("foo"), []byte("bar")) + var dst bytes.Buffer + w := FromIOWriter{singleByteWriter{&dst}} + n, err := WriteFullFromBlocks(w, BlockSeqFromSlice(srcs)) + // WriteFullToBlocks should call into FromIOWriter => singleByteWriter + // repeatedly until srcs is exhausted. + if wantN := uint64(6); n != wantN || err != nil { + t.Errorf("WriteFullFromBlocks: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst.Bytes(), []byte("foobar"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} diff --git a/pkg/sentry/safemem/safemem.go b/pkg/sentry/safemem/safemem.go new file mode 100644 index 000000000..2f8002004 --- /dev/null +++ b/pkg/sentry/safemem/safemem.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package safemem provides the Block and BlockSeq types. +package safemem diff --git a/pkg/sentry/safemem/seq_test.go b/pkg/sentry/safemem/seq_test.go new file mode 100644 index 000000000..3e83b3851 --- /dev/null +++ b/pkg/sentry/safemem/seq_test.go @@ -0,0 +1,196 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safemem + +import ( + "bytes" + "reflect" + "testing" +) + +type blockSeqTest struct { + desc string + + pieces []string + haveOffset bool + offset uint64 + haveLimit bool + limit uint64 + + want string +} + +func (t blockSeqTest) NonEmptyByteSlices() [][]byte { + // t is a value, so we can mutate it freely. + slices := make([][]byte, 0, len(t.pieces)) + for _, str := range t.pieces { + if t.haveOffset { + strOff := t.offset + if strOff > uint64(len(str)) { + strOff = uint64(len(str)) + } + str = str[strOff:] + t.offset -= strOff + } + if t.haveLimit { + strLim := t.limit + if strLim > uint64(len(str)) { + strLim = uint64(len(str)) + } + str = str[:strLim] + t.limit -= strLim + } + if len(str) != 0 { + slices = append(slices, []byte(str)) + } + } + return slices +} + +func (t blockSeqTest) BlockSeq() BlockSeq { + blocks := make([]Block, 0, len(t.pieces)) + for _, str := range t.pieces { + blocks = append(blocks, BlockFromSafeSlice([]byte(str))) + } + bs := BlockSeqFromSlice(blocks) + if t.haveOffset { + bs = bs.DropFirst64(t.offset) + } + if t.haveLimit { + bs = bs.TakeFirst64(t.limit) + } + return bs +} + +var blockSeqTests = []blockSeqTest{ + { + desc: "Empty sequence", + }, + { + desc: "Sequence of length 1", + pieces: []string{"foobar"}, + want: "foobar", + }, + { + desc: "Sequence of length 2", + pieces: []string{"foo", "bar"}, + want: "foobar", + }, + { + desc: "Empty Blocks", + pieces: []string{"", "foo", "", "", "bar", ""}, + want: "foobar", + }, + { + desc: "Sequence with non-zero offset", + pieces: []string{"foo", "bar"}, + haveOffset: true, + offset: 2, + want: "obar", + }, + { + desc: "Sequence with non-maximal limit", + pieces: []string{"foo", "bar"}, + haveLimit: true, + limit: 5, + want: "fooba", + }, + { + desc: "Sequence with offset and limit", + pieces: []string{"foo", "bar"}, + haveOffset: true, + offset: 2, + haveLimit: true, + limit: 3, + want: "oba", + }, +} + +func TestBlockSeqNumBytes(t *testing.T) { + for _, test := range blockSeqTests { + t.Run(test.desc, func(t *testing.T) { + if got, want := test.BlockSeq().NumBytes(), uint64(len(test.want)); got != want { + t.Errorf("NumBytes: got %d, wanted %d", got, want) + } + }) + } +} + +func TestBlockSeqIterBlocks(t *testing.T) { + // Tests BlockSeq iteration using Head/Tail. + for _, test := range blockSeqTests { + t.Run(test.desc, func(t *testing.T) { + srcs := test.BlockSeq() + // "Note that a non-nil empty slice and a nil slice ... are not + // deeply equal." - reflect + slices := make([][]byte, 0, 0) + for !srcs.IsEmpty() { + src := srcs.Head() + slices = append(slices, src.ToSlice()) + nextSrcs := srcs.Tail() + if got, want := nextSrcs.NumBytes(), srcs.NumBytes()-uint64(src.Len()); got != want { + t.Fatalf("%v.Tail(): got %v (%d bytes), wanted %d bytes", srcs, nextSrcs, got, want) + } + srcs = nextSrcs + } + if wantSlices := test.NonEmptyByteSlices(); !reflect.DeepEqual(slices, wantSlices) { + t.Errorf("Accumulated slices: got %v, wanted %v", slices, wantSlices) + } + }) + } +} + +func TestBlockSeqIterBytes(t *testing.T) { + // Tests BlockSeq iteration using Head/DropFirst. + for _, test := range blockSeqTests { + t.Run(test.desc, func(t *testing.T) { + srcs := test.BlockSeq() + var dst bytes.Buffer + for !srcs.IsEmpty() { + src := srcs.Head() + var b [1]byte + n, err := Copy(BlockFromSafeSlice(b[:]), src) + if n != 1 || err != nil { + t.Fatalf("Copy: got (%v, %v), wanted (1, nil)", n, err) + } + dst.WriteByte(b[0]) + nextSrcs := srcs.DropFirst(1) + if got, want := nextSrcs.NumBytes(), srcs.NumBytes()-1; got != want { + t.Fatalf("%v.DropFirst(1): got %v (%d bytes), wanted %d bytes", srcs, nextSrcs, got, want) + } + srcs = nextSrcs + } + if got := string(dst.Bytes()); got != test.want { + t.Errorf("Copied string: got %q, wanted %q", got, test.want) + } + }) + } +} + +func TestBlockSeqDropBeyondLimit(t *testing.T) { + blocks := []Block{BlockFromSafeSlice([]byte("123")), BlockFromSafeSlice([]byte("4"))} + bs := BlockSeqFromSlice(blocks) + if got, want := bs.NumBytes(), uint64(4); got != want { + t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want) + } + bs = bs.TakeFirst(1) + if got, want := bs.NumBytes(), uint64(1); got != want { + t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want) + } + bs = bs.DropFirst(2) + if got, want := bs.NumBytes(), uint64(0); got != want { + t.Errorf("%v.NumBytes(): got %d, wanted %d", bs, got, want) + } +} diff --git a/pkg/sentry/safemem/seq_unsafe.go b/pkg/sentry/safemem/seq_unsafe.go new file mode 100644 index 000000000..e0d29a0b3 --- /dev/null +++ b/pkg/sentry/safemem/seq_unsafe.go @@ -0,0 +1,299 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package safemem + +import ( + "bytes" + "fmt" + "reflect" + "unsafe" +) + +// A BlockSeq represents a sequence of Blocks, each of which has non-zero +// length. +// +// BlockSeqs are immutable and may be copied by value. The zero value of +// BlockSeq represents an empty sequence. +type BlockSeq struct { + // If length is 0, then the BlockSeq is empty. Invariants: data == 0; + // offset == 0; limit == 0. + // + // If length is -1, then the BlockSeq represents the single Block{data, + // limit, false}. Invariants: offset == 0; limit > 0; limit does not + // overflow the range of an int. + // + // If length is -2, then the BlockSeq represents the single Block{data, + // limit, true}. Invariants: offset == 0; limit > 0; limit does not + // overflow the range of an int. + // + // Otherwise, length >= 2, and the BlockSeq represents the `length` Blocks + // in the array of Blocks starting at address `data`, starting at `offset` + // bytes into the first Block and limited to the following `limit` bytes. + // Invariants: data != 0; offset < len(data[0]); limit > 0; offset+limit <= + // the combined length of all Blocks in the array; the first Block in the + // array has non-zero length. + // + // length is never 1; sequences consisting of a single Block are always + // stored inline (with length < 0). + data unsafe.Pointer + length int + offset int + limit uint64 +} + +// BlockSeqOf returns a BlockSeq representing the single Block b. +func BlockSeqOf(b Block) BlockSeq { + bs := BlockSeq{ + data: b.start, + length: -1, + limit: uint64(b.length), + } + if b.needSafecopy { + bs.length = -2 + } + return bs +} + +// BlockSeqFromSlice returns a BlockSeq representing all Blocks in slice. +// If slice contains Blocks with zero length, BlockSeq will skip them during +// iteration. +// +// Whether the returned BlockSeq shares memory with slice is unspecified; +// clients should avoid mutating slices passed to BlockSeqFromSlice. +// +// Preconditions: The combined length of all Blocks in slice <= math.MaxUint64. +func BlockSeqFromSlice(slice []Block) BlockSeq { + slice = skipEmpty(slice) + var limit uint64 + for _, b := range slice { + sum := limit + uint64(b.Len()) + if sum < limit { + panic("BlockSeq length overflows uint64") + } + limit = sum + } + return blockSeqFromSliceLimited(slice, limit) +} + +// Preconditions: The combined length of all Blocks in slice <= limit. If +// len(slice) != 0, the first Block in slice has non-zero length, and limit > +// 0. +func blockSeqFromSliceLimited(slice []Block, limit uint64) BlockSeq { + switch len(slice) { + case 0: + return BlockSeq{} + case 1: + return BlockSeqOf(slice[0].TakeFirst64(limit)) + default: + return BlockSeq{ + data: unsafe.Pointer(&slice[0]), + length: len(slice), + limit: limit, + } + } +} + +func skipEmpty(slice []Block) []Block { + for i, b := range slice { + if b.Len() != 0 { + return slice[i:] + } + } + return nil +} + +// IsEmpty returns true if bs contains no Blocks. +// +// Invariants: bs.IsEmpty() == (bs.NumBlocks() == 0) == (bs.NumBytes() == 0). +// (Of these, prefer to use bs.IsEmpty().) +func (bs BlockSeq) IsEmpty() bool { + return bs.length == 0 +} + +// NumBlocks returns the number of Blocks in bs. +func (bs BlockSeq) NumBlocks() int { + // In general, we have to count: if bs represents a windowed slice then the + // slice may contain Blocks with zero length, and bs.length may be larger + // than the actual number of Blocks due to bs.limit. + var n int + for !bs.IsEmpty() { + n++ + bs = bs.Tail() + } + return n +} + +// NumBytes returns the sum of Block.Len() for all Blocks in bs. +func (bs BlockSeq) NumBytes() uint64 { + return bs.limit +} + +// Head returns the first Block in bs. +// +// Preconditions: !bs.IsEmpty(). +func (bs BlockSeq) Head() Block { + if bs.length == 0 { + panic("empty BlockSeq") + } + if bs.length < 0 { + return bs.internalBlock() + } + return (*Block)(bs.data).DropFirst(bs.offset).TakeFirst64(bs.limit) +} + +// Preconditions: bs.length < 0. +func (bs BlockSeq) internalBlock() Block { + return Block{ + start: bs.data, + length: int(bs.limit), + needSafecopy: bs.length == -2, + } +} + +// Tail returns a BlockSeq consisting of all Blocks in bs after the first. +// +// Preconditions: !bs.IsEmpty(). +func (bs BlockSeq) Tail() BlockSeq { + if bs.length == 0 { + panic("empty BlockSeq") + } + if bs.length < 0 { + return BlockSeq{} + } + head := (*Block)(bs.data).DropFirst(bs.offset) + headLen := uint64(head.Len()) + if headLen >= bs.limit { + // The head Block exhausts the limit, so the tail is empty. + return BlockSeq{} + } + var extSlice []Block + extSliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&extSlice)) + extSliceHdr.Data = uintptr(bs.data) + extSliceHdr.Len = bs.length + extSliceHdr.Cap = bs.length + tailSlice := skipEmpty(extSlice[1:]) + tailLimit := bs.limit - headLen + return blockSeqFromSliceLimited(tailSlice, tailLimit) +} + +// DropFirst returns a BlockSeq equivalent to bs, but with the first n bytes +// omitted. If n > bs.NumBytes(), DropFirst returns an empty BlockSeq. +// +// Preconditions: n >= 0. +func (bs BlockSeq) DropFirst(n int) BlockSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return bs.DropFirst64(uint64(n)) +} + +// DropFirst64 is equivalent to DropFirst but takes an uint64. +func (bs BlockSeq) DropFirst64(n uint64) BlockSeq { + if n >= bs.limit { + return BlockSeq{} + } + for { + // Calling bs.Head() here is surprisingly expensive, so inline getting + // the head's length. + var headLen uint64 + if bs.length < 0 { + headLen = bs.limit + } else { + headLen = uint64((*Block)(bs.data).Len() - bs.offset) + } + if n < headLen { + // Dropping ends partway through the head Block. + if bs.length < 0 { + return BlockSeqOf(bs.internalBlock().DropFirst64(n)) + } + bs.offset += int(n) + bs.limit -= n + return bs + } + n -= headLen + bs = bs.Tail() + } +} + +// TakeFirst returns a BlockSeq equivalent to the first n bytes of bs. If n > +// bs.NumBytes(), TakeFirst returns a BlockSeq equivalent to bs. +// +// Preconditions: n >= 0. +func (bs BlockSeq) TakeFirst(n int) BlockSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return bs.TakeFirst64(uint64(n)) +} + +// TakeFirst64 is equivalent to TakeFirst but takes a uint64. +func (bs BlockSeq) TakeFirst64(n uint64) BlockSeq { + if n == 0 { + return BlockSeq{} + } + if bs.limit > n { + bs.limit = n + } + return bs +} + +// String implements fmt.Stringer.String. +func (bs BlockSeq) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + var sep string + for !bs.IsEmpty() { + buf.WriteString(sep) + sep = " " + buf.WriteString(bs.Head().String()) + bs = bs.Tail() + } + buf.WriteByte(']') + return buf.String() +} + +// CopySeq copies srcs.NumBytes() or dsts.NumBytes() bytes, whichever is less, +// from srcs to dsts and returns the number of bytes copied. +// +// If srcs and dsts overlap, the data stored in dsts is unspecified. +func CopySeq(dsts, srcs BlockSeq) (uint64, error) { + var done uint64 + for !dsts.IsEmpty() && !srcs.IsEmpty() { + dst := dsts.Head() + src := srcs.Head() + n, err := Copy(dst, src) + done += uint64(n) + if err != nil { + return done, err + } + dsts = dsts.DropFirst(n) + srcs = srcs.DropFirst(n) + } + return done, nil +} + +// ZeroSeq sets all bytes in dsts to 0 and returns the number of bytes zeroed. +func ZeroSeq(dsts BlockSeq) (uint64, error) { + var done uint64 + for !dsts.IsEmpty() { + n, err := Zero(dsts.Head()) + done += uint64(n) + if err != nil { + return done, err + } + dsts = dsts.DropFirst(n) + } + return done, nil +} diff --git a/pkg/sentry/sighandling/BUILD b/pkg/sentry/sighandling/BUILD new file mode 100644 index 000000000..daaad7c90 --- /dev/null +++ b/pkg/sentry/sighandling/BUILD @@ -0,0 +1,18 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sighandling", + srcs = [ + "sighandling.go", + "sighandling_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/sighandling", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/arch", + "//pkg/sentry/kernel", + ], +) diff --git a/pkg/sentry/sighandling/sighandling.go b/pkg/sentry/sighandling/sighandling.go new file mode 100644 index 000000000..1a94b535b --- /dev/null +++ b/pkg/sentry/sighandling/sighandling.go @@ -0,0 +1,116 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sighandling contains helpers for handling signals to applications. +package sighandling + +import ( + "os" + "os/signal" + "reflect" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// numSignals is the number of normal (non-realtime) signals on Linux. +const numSignals = 32 + +// forwardSignals listens for incoming signals and delivers them to k. It stops +// when the stop channel is closed. +func forwardSignals(k *kernel.Kernel, sigchans []chan os.Signal, stop chan struct{}) { + // Build a select case. + sc := []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(stop)}} + for _, sigchan := range sigchans { + sc = append(sc, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(sigchan)}) + } + + for { + // Wait for a notification. + index, _, ok := reflect.Select(sc) + + // Was it the stop channel? + if index == 0 { + if !ok { + break + } + continue + } + + // How about a different close? + if !ok { + panic("signal channel closed unexpectedly") + } + + // Otherwise, it was a signal on channel N. Index 0 represents the stop + // channel, so index N represents the channel for signal N. + if !k.SendExternalSignal(&arch.SignalInfo{Signo: int32(index)}, "sentry") { + // Kernel is not ready to receive signals. + // + // Kill ourselves if this signal would have killed the + // process before StartForwarding was called. i.e., all + // _SigKill signals; see Go + // src/runtime/sigtab_linux_generic.go. + // + // Otherwise ignore the signal. + // + // TODO: Convert Go's runtime.raise from + // tkill to tgkill so StartForwarding doesn't need to + // be called until after filter installation. + switch linux.Signal(index) { + case linux.SIGHUP, linux.SIGINT, linux.SIGTERM: + dieFromSignal(linux.Signal(index)) + } + } + } + + // Close all individual channels. + for _, sigchan := range sigchans { + signal.Stop(sigchan) + close(sigchan) + } +} + +// StartForwarding ensures that synchronous signals are forwarded to k and +// returns a callback that stops signal forwarding. +func StartForwarding(k *kernel.Kernel) func() { + stop := make(chan struct{}) + + // Register individual channels. One channel per standard signal is + // required as os.Notify() is non-blocking and may drop signals. To avoid + // this, standard signals have to be queued separately. Channel size 1 is + // enough for standard signals as their semantics allow de-duplication. + // + // External real-time signals are not supported. We rely on the go-runtime + // for their handling. + var sigchans []chan os.Signal + for sig := 1; sig <= numSignals+1; sig++ { + sigchan := make(chan os.Signal, 1) + sigchans = append(sigchans, sigchan) + + // SignalPanic is handled by Run. + if linux.Signal(sig) == kernel.SignalPanic { + continue + } + + signal.Notify(sigchan, syscall.Signal(sig)) + } + // Start up our listener. + go forwardSignals(k, sigchans, stop) // S/R-SAFE: synchronized by Kernel.extMu + + // ... shouldn't this wait until the forwardSignals goroutine returns? + return func() { close(stop) } +} diff --git a/pkg/sentry/sighandling/sighandling_unsafe.go b/pkg/sentry/sighandling/sighandling_unsafe.go new file mode 100644 index 000000000..a455b919f --- /dev/null +++ b/pkg/sentry/sighandling/sighandling_unsafe.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sighandling + +import ( + "fmt" + "runtime" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// TODO: Move to pkg/abi/linux along with definitions in +// pkg/sentry/arch. +type sigaction struct { + handler uintptr + flags uint64 + restorer uintptr + mask uint64 +} + +// IgnoreChildStop sets the SA_NOCLDSTOP flag, causing child processes to not +// generate SIGCHLD when they stop. +func IgnoreChildStop() error { + var sa sigaction + + // Get the existing signal handler information, and set the flag. + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGACTION, uintptr(syscall.SIGCHLD), 0, uintptr(unsafe.Pointer(&sa)), linux.SignalSetSize, 0, 0); e != 0 { + return e + } + sa.flags |= linux.SA_NOCLDSTOP + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGACTION, uintptr(syscall.SIGCHLD), uintptr(unsafe.Pointer(&sa)), 0, linux.SignalSetSize, 0, 0); e != 0 { + return e + } + + return nil +} + +// dieFromSignal kills the current process with sig. +// +// Preconditions: The default action of sig is termination. +func dieFromSignal(sig linux.Signal) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + sa := sigaction{handler: linux.SIG_DFL} + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGACTION, uintptr(sig), uintptr(unsafe.Pointer(&sa)), 0, linux.SignalSetSize, 0, 0); e != 0 { + panic(fmt.Sprintf("rt_sigaction failed: %v", e)) + } + + set := linux.MakeSignalSet(sig) + if _, _, e := syscall.RawSyscall6(syscall.SYS_RT_SIGPROCMASK, linux.SIG_UNBLOCK, uintptr(unsafe.Pointer(&set)), 0, linux.SignalSetSize, 0, 0); e != 0 { + panic(fmt.Sprintf("rt_sigprocmask failed: %v", e)) + } + + if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.Signal(sig)); err != nil { + panic(fmt.Sprintf("tgkill failed: %v", err)) + } + + panic("failed to die") +} diff --git a/pkg/sentry/socket/BUILD b/pkg/sentry/socket/BUILD new file mode 100644 index 000000000..87e32df37 --- /dev/null +++ b/pkg/sentry/socket/BUILD @@ -0,0 +1,37 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "socket_state", + srcs = [ + "socket.go", + ], + out = "socket_state_autogen.go", + package = "socket", +) + +go_library( + name = "socket", + srcs = [ + "socket.go", + "socket_state_autogen.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/tcpip/transport/unix", + ], +) diff --git a/pkg/sentry/socket/control/BUILD b/pkg/sentry/socket/control/BUILD new file mode 100644 index 000000000..25de2f655 --- /dev/null +++ b/pkg/sentry/socket/control/BUILD @@ -0,0 +1,39 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "control_state", + srcs = [ + "control.go", + ], + out = "control_state.go", + imports = [ + "gvisor.googlesource.com/gvisor/pkg/sentry/fs", + ], + package = "control", +) + +go_library( + name = "control", + srcs = [ + "control.go", + "control_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + ], +) diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go new file mode 100644 index 000000000..cb34cbc85 --- /dev/null +++ b/pkg/sentry/socket/control/control.go @@ -0,0 +1,370 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package control provides internal representations of socket control +// messages. +package control + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +const maxInt = int(^uint(0) >> 1) + +// SCMCredentials represents a SCM_CREDENTIALS socket control message. +type SCMCredentials interface { + unix.CredentialsControlMessage + + // Credentials returns properly namespaced values for the sender's pid, uid + // and gid. + Credentials(t *kernel.Task) (kernel.ThreadID, auth.UID, auth.GID) +} + +// SCMRights represents a SCM_RIGHTS socket control message. +type SCMRights interface { + unix.RightsControlMessage + + // Files returns up to max RightsFiles. + Files(ctx context.Context, max int) RightsFiles +} + +// RightsFiles represents a SCM_RIGHTS socket control message. A reference is +// maintained for each fs.File and is release either when an FD is created or +// when the Release method is called. +type RightsFiles []*fs.File + +// NewSCMRights creates a new SCM_RIGHTS socket control message representation +// using local sentry FDs. +func NewSCMRights(t *kernel.Task, fds []int32) (SCMRights, error) { + files := make(RightsFiles, 0, len(fds)) + for _, fd := range fds { + file, _ := t.FDMap().GetDescriptor(kdefs.FD(fd)) + if file == nil { + files.Release() + return nil, syserror.EBADF + } + files = append(files, file) + } + return &files, nil +} + +// Files implements SCMRights.Files. +func (fs *RightsFiles) Files(ctx context.Context, max int) RightsFiles { + n := max + if l := len(*fs); n > l { + n = l + } + rf := (*fs)[:n] + *fs = (*fs)[n:] + return rf +} + +// Clone implements unix.RightsControlMessage.Clone. +func (fs *RightsFiles) Clone() unix.RightsControlMessage { + nfs := append(RightsFiles(nil), *fs...) + for _, nf := range nfs { + nf.IncRef() + } + return &nfs +} + +// Release implements unix.RightsControlMessage.Release. +func (fs *RightsFiles) Release() { + for _, f := range *fs { + f.DecRef() + } + *fs = nil +} + +// rightsFDs gets up to the specified maximum number of FDs. +func rightsFDs(t *kernel.Task, rights SCMRights, cloexec bool, max int) []int32 { + files := rights.Files(t, max) + fds := make([]int32, 0, len(files)) + for i := 0; i < max && len(files) > 0; i++ { + fd, err := t.FDMap().NewFDFrom(0, files[0], kernel.FDFlags{cloexec}, t.ThreadGroup().Limits()) + files[0].DecRef() + files = files[1:] + if err != nil { + t.Warningf("Error inserting FD: %v", err) + // This is what Linux does. + break + } + + fds = append(fds, int32(fd)) + } + return fds +} + +// PackRights packs as many FDs as will fit into the unused capacity of buf. +func PackRights(t *kernel.Task, rights SCMRights, cloexec bool, buf []byte) []byte { + maxFDs := (cap(buf) - len(buf) - linux.SizeOfControlMessageHeader) / 4 + // Linux does not return any FDs if none fit. + if maxFDs <= 0 { + return buf + } + fds := rightsFDs(t, rights, cloexec, maxFDs) + align := t.Arch().Width() + return putCmsg(buf, linux.SCM_RIGHTS, align, fds) +} + +// scmCredentials represents an SCM_CREDENTIALS socket control message. +type scmCredentials struct { + t *kernel.Task + kuid auth.KUID + kgid auth.KGID +} + +// NewSCMCredentials creates a new SCM_CREDENTIALS socket control message +// representation. +func NewSCMCredentials(t *kernel.Task, cred linux.ControlMessageCredentials) (SCMCredentials, error) { + tcred := t.Credentials() + kuid, err := tcred.UseUID(auth.UID(cred.UID)) + if err != nil { + return nil, err + } + kgid, err := tcred.UseGID(auth.GID(cred.GID)) + if err != nil { + return nil, err + } + if kernel.ThreadID(cred.PID) != t.ThreadGroup().ID() && !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.PIDNamespace().UserNamespace()) { + return nil, syserror.EPERM + } + return &scmCredentials{t, kuid, kgid}, nil +} + +// Equals implements unix.CredentialsControlMessage.Equals. +func (c *scmCredentials) Equals(oc unix.CredentialsControlMessage) bool { + if oc, _ := oc.(*scmCredentials); oc != nil && *c == *oc { + return true + } + return false +} + +func putUint64(buf []byte, n uint64) []byte { + usermem.ByteOrder.PutUint64(buf[len(buf):len(buf)+8], n) + return buf[:len(buf)+8] +} + +func putUint32(buf []byte, n uint32) []byte { + usermem.ByteOrder.PutUint32(buf[len(buf):len(buf)+4], n) + return buf[:len(buf)+4] +} + +// putCmsg writes a control message header and as much data as will fit into +// the unused capacity of a buffer. +func putCmsg(buf []byte, msgType uint32, align uint, data []int32) []byte { + space := AlignDown(cap(buf)-len(buf), 4) + + // We can't write to space that doesn't exist, so if we are going to align + // the available space, we must align down. + // + // align must be >= 4 and each data int32 is 4 bytes. The length of the + // header is already aligned, so if we align to the with of the data there + // are two cases: + // 1. The aligned length is less than the length of the header. The + // unaligned length was also less than the length of the header, so we + // can't write anything. + // 2. The aligned length is greater than or equal to the length of the + // header. We can write the header plus zero or more datas. We can't write + // a partial int32, so the length of the message will be + // min(aligned length, header + datas). + if space < linux.SizeOfControlMessageHeader { + return buf + } + + length := 4*len(data) + linux.SizeOfControlMessageHeader + if length > space { + length = space + } + buf = putUint64(buf, uint64(length)) + buf = putUint32(buf, linux.SOL_SOCKET) + buf = putUint32(buf, msgType) + for _, d := range data { + if len(buf)+4 > cap(buf) { + break + } + buf = putUint32(buf, uint32(d)) + } + return alignSlice(buf, align) +} + +// Credentials implements SCMCredentials.Credentials. +func (c *scmCredentials) Credentials(t *kernel.Task) (kernel.ThreadID, auth.UID, auth.GID) { + // "When a process's user and group IDs are passed over a UNIX domain + // socket to a process in a different user namespace (see the description + // of SCM_CREDENTIALS in unix(7)), they are translated into the + // corresponding values as per the receiving process's user and group ID + // mappings." - user_namespaces(7) + pid := t.PIDNamespace().IDOfTask(c.t) + uid := c.kuid.In(t.UserNamespace()).OrOverflow() + gid := c.kgid.In(t.UserNamespace()).OrOverflow() + + return pid, uid, gid +} + +// PackCredentials packs the credentials in the control message (or default +// credentials if none) into a buffer. +func PackCredentials(t *kernel.Task, creds SCMCredentials, buf []byte) []byte { + align := t.Arch().Width() + + // Default credentials if none are available. + pid := kernel.ThreadID(0) + uid := auth.UID(auth.NobodyKUID) + gid := auth.GID(auth.NobodyKGID) + + if creds != nil { + pid, uid, gid = creds.Credentials(t) + } + c := []int32{int32(pid), int32(uid), int32(gid)} + return putCmsg(buf, linux.SCM_CREDENTIALS, align, c) +} + +// AlignUp rounds a length up to an alignment. align must be a power of 2. +func AlignUp(length int, align uint) int { + return (length + int(align) - 1) & ^(int(align) - 1) +} + +// AlignDown rounds a down to an alignment. align must be a power of 2. +func AlignDown(length int, align uint) int { + return length & ^(int(align) - 1) +} + +// alignSlice extends a slice's length (up to the capacity) to align it. +func alignSlice(buf []byte, align uint) []byte { + aligned := AlignUp(len(buf), align) + if aligned > cap(buf) { + // Linux allows unaligned data if there isn't room for alignment. + // Since there isn't room for alignment, there isn't room for any + // additional messages either. + return buf + } + return buf[:aligned] +} + +// Parse parses a raw socket control message into portable objects. +func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (unix.ControlMessages, error) { + var ( + fds linux.ControlMessageRights + + haveCreds bool + creds linux.ControlMessageCredentials + ) + + for i := 0; i < len(buf); { + if i+linux.SizeOfControlMessageHeader > len(buf) { + return unix.ControlMessages{}, syserror.EINVAL + } + + var h linux.ControlMessageHeader + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h) + + if h.Length < uint64(linux.SizeOfControlMessageHeader) { + return unix.ControlMessages{}, syserror.EINVAL + } + if h.Length > uint64(len(buf)-i) { + return unix.ControlMessages{}, syserror.EINVAL + } + if h.Level != linux.SOL_SOCKET { + return unix.ControlMessages{}, syserror.EINVAL + } + + i += linux.SizeOfControlMessageHeader + length := int(h.Length) - linux.SizeOfControlMessageHeader + + // The use of t.Arch().Width() is analogous to Linux's use of + // sizeof(long) in CMSG_ALIGN. + width := t.Arch().Width() + + switch h.Type { + case linux.SCM_RIGHTS: + rightsSize := AlignDown(length, linux.SizeOfControlMessageRight) + numRights := rightsSize / linux.SizeOfControlMessageRight + + if len(fds)+numRights > linux.SCM_MAX_FD { + return unix.ControlMessages{}, syserror.EINVAL + } + + for j := i; j < i+rightsSize; j += linux.SizeOfControlMessageRight { + fds = append(fds, int32(usermem.ByteOrder.Uint32(buf[j:j+linux.SizeOfControlMessageRight]))) + } + + i += AlignUp(length, width) + + case linux.SCM_CREDENTIALS: + if length < linux.SizeOfControlMessageCredentials { + return unix.ControlMessages{}, syserror.EINVAL + } + + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds) + haveCreds = true + i += AlignUp(length, width) + + default: + // Unknown message type. + return unix.ControlMessages{}, syserror.EINVAL + } + } + + var credentials SCMCredentials + if haveCreds { + var err error + if credentials, err = NewSCMCredentials(t, creds); err != nil { + return unix.ControlMessages{}, err + } + } else { + credentials = makeCreds(t, socketOrEndpoint) + } + + var rights SCMRights + if len(fds) > 0 { + var err error + if rights, err = NewSCMRights(t, fds); err != nil { + return unix.ControlMessages{}, err + } + } + + if credentials == nil && rights == nil { + return unix.ControlMessages{}, nil + } + + return unix.ControlMessages{Credentials: credentials, Rights: rights}, nil +} + +func makeCreds(t *kernel.Task, socketOrEndpoint interface{}) SCMCredentials { + if t == nil || socketOrEndpoint == nil { + return nil + } + if cr, ok := socketOrEndpoint.(unix.Credentialer); ok && (cr.Passcred() || cr.ConnectedPasscred()) { + tcred := t.Credentials() + return &scmCredentials{t, tcred.EffectiveKUID, tcred.EffectiveKGID} + } + return nil +} + +// New creates default control messages if needed. +func New(t *kernel.Task, socketOrEndpoint interface{}, rights SCMRights) unix.ControlMessages { + return unix.ControlMessages{ + Credentials: makeCreds(t, socketOrEndpoint), + Rights: rights, + } +} diff --git a/pkg/sentry/socket/epsocket/BUILD b/pkg/sentry/socket/epsocket/BUILD new file mode 100644 index 000000000..0e463a92a --- /dev/null +++ b/pkg/sentry/socket/epsocket/BUILD @@ -0,0 +1,61 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "epsocket_state", + srcs = [ + "epsocket.go", + "save_restore.go", + "stack.go", + ], + out = "epsocket_state.go", + package = "epsocket", +) + +go_library( + name = "epsocket", + srcs = [ + "device.go", + "epsocket.go", + "epsocket_state.go", + "provider.go", + "save_restore.go", + "stack.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/epsocket", + visibility = [ + "//pkg/sentry:internal", + ], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/log", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/safemem", + "//pkg/sentry/socket", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/tcpip/transport/udp", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/socket/epsocket/device.go b/pkg/sentry/socket/epsocket/device.go new file mode 100644 index 000000000..17f2c9559 --- /dev/null +++ b/pkg/sentry/socket/epsocket/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epsocket + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// epsocketDevice is the endpoint socket virtual device. +var epsocketDevice = device.NewAnonDevice() diff --git a/pkg/sentry/socket/epsocket/epsocket.go b/pkg/sentry/socket/epsocket/epsocket.go new file mode 100644 index 000000000..3fc3ea58f --- /dev/null +++ b/pkg/sentry/socket/epsocket/epsocket.go @@ -0,0 +1,1230 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package epsocket provides an implementation of the socket.Socket interface +// that is backed by a tcpip.Endpoint. +// +// It does not depend on any particular endpoint implementation, and thus can +// be used to expose certain endpoints to the sentry while leaving others out, +// for example, TCP endpoints and Unix-domain endpoints. +// +// Lock ordering: netstack => mm: ioSequencePayload copies user memory inside +// tcpip.Endpoint.Write(). Netstack is allowed to (and does) hold locks during +// this operation. +package epsocket + +import ( + "bytes" + "math" + "strings" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const sizeOfInt32 int = 4 + +// ntohs converts a 16-bit number from network byte order to host byte order. It +// assumes that the host is little endian. +func ntohs(v uint16) uint16 { + return v<<8 | v>>8 +} + +// htons converts a 16-bit number from host byte order to network byte order. It +// assumes that the host is little endian. +func htons(v uint16) uint16 { + return ntohs(v) +} + +// commonEndpoint represents the intersection of a tcpip.Endpoint and a +// unix.Endpoint. +type commonEndpoint interface { + // GetLocalAddress implements tcpip.Endpoint.GetLocalAddress and + // unix.Endpoint.GetLocalAddress. + GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) + + // GetRemoteAddress implements tcpip.Endpoint.GetRemoteAddress and + // unix.Endpoint.GetRemoteAddress. + GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) + + // Readiness implements tcpip.Endpoint.Readiness and + // unix.Endpoint.Readiness. + Readiness(mask waiter.EventMask) waiter.EventMask + + // SetSockOpt implements tcpip.Endpoint.SetSockOpt and + // unix.Endpoint.SetSockOpt. + SetSockOpt(interface{}) *tcpip.Error + + // GetSockOpt implements tcpip.Endpoint.GetSockOpt and + // unix.Endpoint.GetSockOpt. + GetSockOpt(interface{}) *tcpip.Error +} + +// SocketOperations encapsulates all the state needed to represent a network stack +// endpoint in the kernel context. +type SocketOperations struct { + socket.ReceiveTimeout + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + *waiter.Queue + + family int + stack inet.Stack + Endpoint tcpip.Endpoint + skType unix.SockType + + // readMu protects access to readView, control, and sender. + readMu sync.Mutex `state:"nosave"` + readView buffer.View + sender tcpip.FullAddress +} + +// New creates a new endpoint socket. +func New(t *kernel.Task, family int, skType unix.SockType, queue *waiter.Queue, endpoint tcpip.Endpoint) *fs.File { + dirent := socket.NewDirent(t, epsocketDevice) + return fs.NewFile(t, dirent, fs.FileFlags{Read: true, Write: true}, &SocketOperations{ + Queue: queue, + family: family, + stack: t.NetworkContext(), + Endpoint: endpoint, + skType: skType, + }) +} + +var sockAddrInetSize = int(binary.Size(linux.SockAddrInet{})) +var sockAddrInet6Size = int(binary.Size(linux.SockAddrInet6{})) + +// GetAddress reads an sockaddr struct from the given address and converts it +// to the FullAddress format. It supports AF_UNIX, AF_INET and AF_INET6 +// addresses. +func GetAddress(sfamily int, addr []byte) (tcpip.FullAddress, *syserr.Error) { + // Make sure we have at least 2 bytes for the address family. + if len(addr) < 2 { + return tcpip.FullAddress{}, syserr.ErrInvalidArgument + } + + family := usermem.ByteOrder.Uint16(addr) + if family != uint16(sfamily) { + return tcpip.FullAddress{}, syserr.ErrAddressFamilyNotSupported + } + + // Get the rest of the fields based on the address family. + switch family { + case linux.AF_UNIX: + path := addr[2:] + // Drop the terminating NUL (if one exists) and everything after it. + // Skip the first byte, which is NUL for abstract paths. + if len(path) > 1 { + if n := bytes.IndexByte(path[1:], 0); n >= 0 { + path = path[:n+1] + } + } + return tcpip.FullAddress{ + Addr: tcpip.Address(path), + }, nil + + case linux.AF_INET: + var a linux.SockAddrInet + if len(addr) < sockAddrInetSize { + return tcpip.FullAddress{}, syserr.ErrBadAddress + } + binary.Unmarshal(addr[:sockAddrInetSize], usermem.ByteOrder, &a) + + out := tcpip.FullAddress{ + Addr: tcpip.Address(a.Addr[:]), + Port: ntohs(a.Port), + } + if out.Addr == "\x00\x00\x00\x00" { + out.Addr = "" + } + return out, nil + + case linux.AF_INET6: + var a linux.SockAddrInet6 + if len(addr) < sockAddrInet6Size { + return tcpip.FullAddress{}, syserr.ErrBadAddress + } + binary.Unmarshal(addr[:sockAddrInet6Size], usermem.ByteOrder, &a) + + out := tcpip.FullAddress{ + Addr: tcpip.Address(a.Addr[:]), + Port: ntohs(a.Port), + } + if isLinkLocal(out.Addr) { + out.NIC = tcpip.NICID(a.Scope_id) + } + if out.Addr == tcpip.Address(strings.Repeat("\x00", 16)) { + out.Addr = "" + } + return out, nil + + default: + return tcpip.FullAddress{}, syserr.ErrAddressFamilyNotSupported + } +} + +func (s *SocketOperations) isPacketBased() bool { + return s.skType == linux.SOCK_DGRAM || s.skType == linux.SOCK_SEQPACKET || s.skType == linux.SOCK_RDM +} + +// fetchReadView updates the readView field of the socket if it's currently +// empty. It assumes that the socket is locked. +func (s *SocketOperations) fetchReadView() *syserr.Error { + if len(s.readView) > 0 { + return nil + } + + s.readView = nil + s.sender = tcpip.FullAddress{} + + v, err := s.Endpoint.Read(&s.sender) + if err != nil { + return syserr.TranslateNetstackError(err) + } + + s.readView = v + + return nil +} + +// Release implements fs.FileOperations.Release. +func (s *SocketOperations) Release() { + s.Endpoint.Close() +} + +// Read implements fs.FileOperations.Read. +func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() == 0 { + return 0, nil + } + n, _, _, err := s.nonBlockingRead(ctx, dst, false, false, false) + if err == syserr.ErrWouldBlock { + return int64(n), syserror.ErrWouldBlock + } + if err != nil { + return 0, err.ToError() + } + return int64(n), nil +} + +// ioSequencePayload implements tcpip.Payload. It copies user memory bytes on demand +// based on the requested size. +type ioSequencePayload struct { + ctx context.Context + src usermem.IOSequence +} + +// Get implements tcpip.Payload. +func (i *ioSequencePayload) Get(size int) ([]byte, *tcpip.Error) { + if size > i.Size() { + size = i.Size() + } + v := buffer.NewView(size) + if _, err := i.src.CopyIn(i.ctx, v); err != nil { + return nil, tcpip.ErrBadAddress + } + return v, nil +} + +// Size implements tcpip.Payload. +func (i *ioSequencePayload) Size() int { + return int(i.src.NumBytes()) +} + +// Write implements fs.FileOperations.Write. +func (s *SocketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + f := &ioSequencePayload{ctx: ctx, src: src} + n, err := s.Endpoint.Write(f, tcpip.WriteOptions{}) + if err == tcpip.ErrWouldBlock { + return int64(n), syserror.ErrWouldBlock + } + return int64(n), syserr.TranslateNetstackError(err).ToError() +} + +// Readiness returns a mask of ready events for socket s. +func (s *SocketOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + r := s.Endpoint.Readiness(mask) + + // Check our cached value iff the caller asked for readability and the + // endpoint itself is currently not readable. + if (mask & ^r & waiter.EventIn) != 0 { + s.readMu.Lock() + if len(s.readView) > 0 { + r |= waiter.EventIn + } + s.readMu.Unlock() + } + + return r +} + +// Connect implements the linux syscall connect(2) for sockets backed by +// tpcip.Endpoint. +func (s *SocketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error { + addr, err := GetAddress(s.family, sockaddr) + if err != nil { + return err + } + + // Always return right away in the non-blocking case. + if !blocking { + return syserr.TranslateNetstackError(s.Endpoint.Connect(addr)) + } + + // Register for notification when the endpoint becomes writable, then + // initiate the connection. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + + if err := s.Endpoint.Connect(addr); err != tcpip.ErrConnectStarted && err != tcpip.ErrAlreadyConnecting { + return syserr.TranslateNetstackError(err) + } + + // It's pending, so we have to wait for a notification, and fetch the + // result once the wait completes. + if err := t.Block(ch); err != nil { + return syserr.FromError(err) + } + + // Call Connect() again after blocking to find connect's result. + return syserr.TranslateNetstackError(s.Endpoint.Connect(addr)) +} + +// Bind implements the linux syscall bind(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { + addr, err := GetAddress(s.family, sockaddr) + if err != nil { + return err + } + + // Issue the bind request to the endpoint. + return syserr.TranslateNetstackError(s.Endpoint.Bind(addr, nil)) +} + +// Listen implements the linux syscall listen(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error { + return syserr.TranslateNetstackError(s.Endpoint.Listen(backlog)) +} + +// blockingAccept implements a blocking version of accept(2), that is, if no +// connections are ready to be accept, it will block until one becomes ready. +func (s *SocketOperations) blockingAccept(t *kernel.Task) (tcpip.Endpoint, *waiter.Queue, *syserr.Error) { + // Register for notifications. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + // Try to accept the connection again; if it fails, then wait until we + // get a notification. + for { + if ep, wq, err := s.Endpoint.Accept(); err != tcpip.ErrWouldBlock { + return ep, wq, syserr.TranslateNetstackError(err) + } + + if err := t.Block(ch); err != nil { + return nil, nil, syserr.FromError(err) + } + } +} + +// Accept implements the linux syscall accept(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) { + // Issue the accept request to get the new endpoint. + ep, wq, err := s.Endpoint.Accept() + if err != nil { + if err != tcpip.ErrWouldBlock || !blocking { + return 0, nil, 0, syserr.TranslateNetstackError(err) + } + + var err *syserr.Error + ep, wq, err = s.blockingAccept(t) + if err != nil { + return 0, nil, 0, err + } + } + + ns := New(t, s.family, s.skType, wq, ep) + defer ns.DecRef() + + if flags&linux.SOCK_NONBLOCK != 0 { + flags := ns.Flags() + flags.NonBlocking = true + ns.SetFlags(flags.Settable()) + } + + var addr interface{} + var addrLen uint32 + if peerRequested { + // Get address of the peer and write it to peer slice. + var err *syserr.Error + addr, addrLen, err = ns.FileOperations.(*SocketOperations).GetPeerName(t) + if err != nil { + return 0, nil, 0, err + } + } + + fdFlags := kernel.FDFlags{ + CloseOnExec: flags&linux.SOCK_CLOEXEC != 0, + } + fd, e := t.FDMap().NewFDFrom(0, ns, fdFlags, t.ThreadGroup().Limits()) + + return fd, addr, addrLen, syserr.FromError(e) +} + +// ConvertShutdown converts Linux shutdown flags into tcpip shutdown flags. +func ConvertShutdown(how int) (tcpip.ShutdownFlags, *syserr.Error) { + var f tcpip.ShutdownFlags + switch how { + case linux.SHUT_RD: + f = tcpip.ShutdownRead + case linux.SHUT_WR: + f = tcpip.ShutdownWrite + case linux.SHUT_RDWR: + f = tcpip.ShutdownRead | tcpip.ShutdownWrite + default: + return 0, syserr.ErrInvalidArgument + } + return f, nil +} + +// Shutdown implements the linux syscall shutdown(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error { + f, err := ConvertShutdown(how) + if err != nil { + return err + } + + // Issue shutdown request. + return syserr.TranslateNetstackError(s.Endpoint.Shutdown(f)) +} + +// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name, outLen int) (interface{}, *syserr.Error) { + return GetSockOpt(t, s, s.Endpoint, s.family, s.skType, level, name, outLen) +} + +// GetSockOpt can be used to implement the linux syscall getsockopt(2) for +// sockets backed by a commonEndpoint. +func GetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, family int, skType unix.SockType, level, name, outLen int) (interface{}, *syserr.Error) { + switch level { + case syscall.SOL_SOCKET: + switch name { + case linux.SO_TYPE: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + return int32(skType), nil + + case linux.SO_ERROR: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + // Get the last error and convert it. + err := ep.GetSockOpt(tcpip.ErrorOption{}) + if err == nil { + return int32(0), nil + } + return int32(syserr.ToLinux(syserr.TranslateNetstackError(err)).Number()), nil + + case linux.SO_PEERCRED: + if family != linux.AF_UNIX || outLen < syscall.SizeofUcred { + return nil, syserr.ErrInvalidArgument + } + + tcred := t.Credentials() + return syscall.Ucred{ + Pid: int32(t.ThreadGroup().ID()), + Uid: uint32(tcred.EffectiveKUID.In(tcred.UserNamespace).OrOverflow()), + Gid: uint32(tcred.EffectiveKGID.In(tcred.UserNamespace).OrOverflow()), + }, nil + + case linux.SO_PASSCRED: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var v tcpip.PasscredOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + return int32(v), nil + + case linux.SO_SNDBUF: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var size tcpip.SendBufferSizeOption + if err := ep.GetSockOpt(&size); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + if size > math.MaxInt32 { + size = math.MaxInt32 + } + + return int32(size), nil + + case linux.SO_RCVBUF: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var size tcpip.ReceiveBufferSizeOption + if err := ep.GetSockOpt(&size); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + if size > math.MaxInt32 { + size = math.MaxInt32 + } + + return int32(size), nil + + case linux.SO_REUSEADDR: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var v tcpip.ReuseAddressOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + return int32(v), nil + + case linux.SO_KEEPALIVE: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + return int32(0), nil + + case linux.SO_LINGER: + if outLen < syscall.SizeofLinger { + return nil, syserr.ErrInvalidArgument + } + return syscall.Linger{}, nil + + case linux.SO_RCVTIMEO: + if outLen < linux.SizeOfTimeval { + return nil, syserr.ErrInvalidArgument + } + + return linux.NsecToTimeval(s.RecvTimeout()), nil + } + + case syscall.SOL_TCP: + switch name { + case syscall.TCP_NODELAY: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var v tcpip.NoDelayOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + return int32(v), nil + + case syscall.TCP_INFO: + var v tcpip.TCPInfoOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + // TODO: Translate fields once they are added to + // tcpip.TCPInfoOption. + info := linux.TCPInfo{} + + // Linux truncates the output binary to outLen. + ib := binary.Marshal(nil, usermem.ByteOrder, &info) + if len(ib) > outLen { + ib = ib[:outLen] + } + + return ib, nil + } + + case syscall.SOL_IPV6: + switch name { + case syscall.IPV6_V6ONLY: + if outLen < sizeOfInt32 { + return nil, syserr.ErrInvalidArgument + } + + var v tcpip.V6OnlyOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + return int32(v), nil + } + } + + return nil, syserr.ErrProtocolNotAvailable +} + +// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error { + return SetSockOpt(t, s, s.Endpoint, level, name, optVal) +} + +// SetSockOpt can be used to implement the linux syscall setsockopt(2) for +// sockets backed by a commonEndpoint. +func SetSockOpt(t *kernel.Task, s socket.Socket, ep commonEndpoint, level int, name int, optVal []byte) *syserr.Error { + switch level { + case syscall.SOL_SOCKET: + switch name { + case linux.SO_SNDBUF: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.SendBufferSizeOption(v))) + + case linux.SO_RCVBUF: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(v))) + + case linux.SO_REUSEADDR: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.ReuseAddressOption(v))) + + case linux.SO_PASSCRED: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.PasscredOption(v))) + + case linux.SO_RCVTIMEO: + if len(optVal) < linux.SizeOfTimeval { + return syserr.ErrInvalidArgument + } + + var v linux.Timeval + binary.Unmarshal(optVal[:linux.SizeOfTimeval], usermem.ByteOrder, &v) + s.SetRecvTimeout(v.ToNsecCapped()) + return nil + } + + case syscall.SOL_TCP: + switch name { + case syscall.TCP_NODELAY: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.NoDelayOption(v))) + } + case syscall.SOL_IPV6: + switch name { + case syscall.IPV6_V6ONLY: + if len(optVal) < sizeOfInt32 { + return syserr.ErrInvalidArgument + } + + v := usermem.ByteOrder.Uint32(optVal) + return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.V6OnlyOption(v))) + } + } + + // FIXME: Disallow IP-level multicast group options by + // default. These will need to be supported by appropriately plumbing + // the level through to the network stack (if at all). However, we + // still allow setting TTL, and multicast-enable/disable type options. + if level == 0 { + const ( + _IP_ADD_MEMBERSHIP = 35 + _MCAST_JOIN_GROUP = 42 + ) + if name == _IP_ADD_MEMBERSHIP || name == _MCAST_JOIN_GROUP { + return syserr.ErrInvalidArgument + } + } + + // Default to the old behavior; hand off to network stack. + return syserr.TranslateNetstackError(ep.SetSockOpt(struct{}{})) +} + +// isLinkLocal determines if the given IPv6 address is link-local. This is the +// case when it has the fe80::/10 prefix. This check is used to determine when +// the NICID is relevant for a given IPv6 address. +func isLinkLocal(addr tcpip.Address) bool { + return len(addr) >= 2 && addr[0] == 0xfe && addr[1]&0xc0 == 0x80 +} + +// ConvertAddress converts the given address to a native format. +func ConvertAddress(family int, addr tcpip.FullAddress) (interface{}, uint32) { + switch family { + case linux.AF_UNIX: + var out linux.SockAddrUnix + out.Family = linux.AF_UNIX + for i := 0; i < len([]byte(addr.Addr)); i++ { + out.Path[i] = int8(addr.Addr[i]) + } + // Linux just returns the header for empty addresses. + if len(addr.Addr) == 0 { + return out, 2 + } + // Linux returns the used length of the address struct (including the + // null terminator) for filesystem paths. The Family field is 2 bytes. + // It is sometimes allowed to exclude the null terminator if the + // address length is the max. Abstract paths always return the full + // length. + if out.Path[0] == 0 || len([]byte(addr.Addr)) == len(out.Path) { + return out, uint32(binary.Size(out)) + } + return out, uint32(3 + len(addr.Addr)) + case linux.AF_INET: + var out linux.SockAddrInet + copy(out.Addr[:], addr.Addr) + out.Family = linux.AF_INET + out.Port = htons(addr.Port) + return out, uint32(binary.Size(out)) + case linux.AF_INET6: + var out linux.SockAddrInet6 + if len(addr.Addr) == 4 { + // Copy address is v4-mapped format. + copy(out.Addr[12:], addr.Addr) + out.Addr[10] = 0xff + out.Addr[11] = 0xff + } else { + copy(out.Addr[:], addr.Addr) + } + out.Family = linux.AF_INET6 + out.Port = htons(addr.Port) + if isLinkLocal(addr.Addr) { + out.Scope_id = uint32(addr.NIC) + } + return out, uint32(binary.Size(out)) + default: + return nil, 0 + } +} + +// GetSockName implements the linux syscall getsockname(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) GetSockName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr, err := s.Endpoint.GetLocalAddress() + if err != nil { + return nil, 0, syserr.TranslateNetstackError(err) + } + + a, l := ConvertAddress(s.family, addr) + return a, l, nil +} + +// GetPeerName implements the linux syscall getpeername(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr, err := s.Endpoint.GetRemoteAddress() + if err != nil { + return nil, 0, syserr.TranslateNetstackError(err) + } + + a, l := ConvertAddress(s.family, addr) + return a, l, nil +} + +// coalescingRead is the fast path for non-blocking, non-peek, stream-based +// case. It coalesces as many packets as possible before returning to the +// caller. +func (s *SocketOperations) coalescingRead(ctx context.Context, dst usermem.IOSequence, discard bool) (int, *syserr.Error) { + var err *syserr.Error + var copied int + + // Copy as many views as possible into the user-provided buffer. + for dst.NumBytes() != 0 { + err = s.fetchReadView() + if err != nil { + break + } + + var n int + var e error + if discard { + n = len(s.readView) + if int64(n) > dst.NumBytes() { + n = int(dst.NumBytes()) + } + } else { + n, e = dst.CopyOut(ctx, s.readView) + } + copied += n + s.readView.TrimFront(n) + dst = dst.DropFirst(n) + if e != nil { + err = syserr.FromError(e) + break + } + } + + // If we managed to copy something, we must deliver it. + if copied > 0 { + return copied, nil + } + + return 0, err +} + +// nonBlockingRead issues a non-blocking read. +func (s *SocketOperations) nonBlockingRead(ctx context.Context, dst usermem.IOSequence, peek, trunc, senderRequested bool) (int, interface{}, uint32, *syserr.Error) { + isPacket := s.isPacketBased() + + // Fast path for regular reads from stream (e.g., TCP) endpoints. Note + // that senderRequested is ignored for stream sockets. + if !peek && !isPacket { + // TCP sockets discard the data if MSG_TRUNC is set. + // + // This behavior is documented in man 7 tcp: + // Since version 2.4, Linux supports the use of MSG_TRUNC in the flags + // argument of recv(2) (and recvmsg(2)). This flag causes the received + // bytes of data to be discarded, rather than passed back in a + // caller-supplied buffer. + s.readMu.Lock() + n, err := s.coalescingRead(ctx, dst, trunc) + s.readMu.Unlock() + return n, nil, 0, err + } + + s.readMu.Lock() + defer s.readMu.Unlock() + + if err := s.fetchReadView(); err != nil { + return 0, nil, 0, err + } + + if !isPacket && peek && trunc { + // MSG_TRUNC with MSG_PEEK on a TCP socket returns the + // amount that could be read. + var rql tcpip.ReceiveQueueSizeOption + if err := s.Endpoint.GetSockOpt(&rql); err != nil { + return 0, nil, 0, syserr.TranslateNetstackError(err) + } + available := len(s.readView) + int(rql) + bufLen := int(dst.NumBytes()) + if available < bufLen { + return available, nil, 0, nil + } + return bufLen, nil, 0, nil + } + + n, err := dst.CopyOut(ctx, s.readView) + var addr interface{} + var addrLen uint32 + if isPacket && senderRequested { + addr, addrLen = ConvertAddress(s.family, s.sender) + } + + if peek { + if l := len(s.readView); trunc && l > n { + // isPacket must be true. + return l, addr, addrLen, syserr.FromError(err) + } + + if isPacket || err != nil { + return int(n), addr, addrLen, syserr.FromError(err) + } + + // We need to peek beyond the first message. + dst = dst.DropFirst(n) + num, err := dst.CopyOutFrom(ctx, safemem.FromVecReaderFunc{func(dsts [][]byte) (int64, error) { + n, err := s.Endpoint.Peek(dsts) + if err != nil { + return int64(n), syserr.TranslateNetstackError(err).ToError() + } + return int64(n), nil + }}) + n += int(num) + if err == syserror.ErrWouldBlock && n > 0 { + // We got some data, so no need to return an error. + err = nil + } + return int(n), nil, 0, syserr.FromError(err) + } + + var msgLen int + if isPacket { + msgLen = len(s.readView) + s.readView = nil + } else { + msgLen = int(n) + s.readView.TrimFront(int(n)) + } + + if trunc { + return msgLen, addr, addrLen, syserr.FromError(err) + } + + return int(n), addr, addrLen, syserr.FromError(err) +} + +// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages unix.ControlMessages, err *syserr.Error) { + trunc := flags&linux.MSG_TRUNC != 0 + + peek := flags&linux.MSG_PEEK != 0 + if senderRequested && !s.isPacketBased() { + // Stream sockets ignore the sender address. + senderRequested = false + } + n, senderAddr, senderAddrLen, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested) + if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + return + } + + // We'll have to block. Register for notifications and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + for { + n, senderAddr, senderAddrLen, err = s.nonBlockingRead(t, dst, peek, trunc, senderRequested) + if err != syserr.ErrWouldBlock { + return + } + + if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + if err == syserror.ETIMEDOUT { + return 0, nil, 0, unix.ControlMessages{}, syserr.ErrTryAgain + } + return 0, nil, 0, unix.ControlMessages{}, syserr.FromError(err) + } + } +} + +// SendMsg implements the linux syscall sendmsg(2) for sockets backed by +// tcpip.Endpoint. +func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + // Reject control messages. + if !controlMessages.Empty() { + return 0, syserr.ErrInvalidArgument + } + + var addr *tcpip.FullAddress + if len(to) > 0 { + addrBuf, err := GetAddress(s.family, to) + if err != nil { + return 0, err + } + + addr = &addrBuf + } + + v := buffer.NewView(int(src.NumBytes())) + + // Copy all the data into the buffer. + if _, err := src.CopyIn(t, v); err != nil { + return 0, syserr.FromError(err) + } + + opts := tcpip.WriteOptions{ + To: addr, + More: flags&linux.MSG_MORE != 0, + EndOfRecord: flags&linux.MSG_EOR != 0, + } + + n, err := s.Endpoint.Write(tcpip.SlicePayload(v), opts) + if err != tcpip.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + return int(n), syserr.TranslateNetstackError(err) + } + + // We'll have to block. Register for notification and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + + v.TrimFront(int(n)) + total := n + for { + n, err = s.Endpoint.Write(tcpip.SlicePayload(v), opts) + v.TrimFront(int(n)) + total += n + if err != tcpip.ErrWouldBlock { + return int(total), syserr.TranslateNetstackError(err) + } + + if err := t.Block(ch); err != nil { + return int(total), syserr.FromError(err) + } + } +} + +// interfaceIoctl implements interface requests. +func (s *SocketOperations) interfaceIoctl(ctx context.Context, io usermem.IO, arg int, ifr *linux.IFReq) *syserr.Error { + var ( + iface inet.Interface + index int32 + found bool + ) + + // Find the relevant device. + for index, iface = range s.stack.Interfaces() { + if iface.Name == ifr.Name() { + found = true + break + } + } + if !found { + return syserr.ErrNoDevice + } + + switch arg { + case syscall.SIOCGIFINDEX: + // Copy out the index to the data. + usermem.ByteOrder.PutUint32(ifr.Data[:], uint32(index)) + + case syscall.SIOCGIFHWADDR: + // Copy the hardware address out. + ifr.Data[0] = 6 // IEEE802.2 arp type. + ifr.Data[1] = 0 + n := copy(ifr.Data[2:], iface.Addr) + for i := 2 + n; i < len(ifr.Data); i++ { + ifr.Data[i] = 0 // Clear padding. + } + usermem.ByteOrder.PutUint16(ifr.Data[:2], uint16(n)) + + case syscall.SIOCGIFFLAGS: + // TODO: Implement. For now, return only that the + // device is up so that ifconfig prints it. + usermem.ByteOrder.PutUint16(ifr.Data[:2], linux.IFF_UP) + + case syscall.SIOCGIFADDR: + // Copy the IPv4 address out. + for _, addr := range s.stack.InterfaceAddrs()[index] { + // This ioctl is only compatible with AF_INET addresses. + if addr.Family != linux.AF_INET { + continue + } + copy(ifr.Data[4:8], addr.Addr) + break + } + + case syscall.SIOCGIFMETRIC: + // Gets the metric of the device. As per netdevice(7), this + // always just sets ifr_metric to 0. + usermem.ByteOrder.PutUint32(ifr.Data[:4], 0) + case syscall.SIOCGIFMTU: + // Gets the MTU of the device. + // TODO: Implement. + + case syscall.SIOCGIFMAP: + // Gets the hardware parameters of the device. + // TODO: Implement. + + case syscall.SIOCGIFTXQLEN: + // Gets the transmit queue length of the device. + // TODO: Implement. + + case syscall.SIOCGIFDSTADDR: + // Gets the destination address of a point-to-point device. + // TODO: Implement. + + case syscall.SIOCGIFBRDADDR: + // Gets the broadcast address of a device. + // TODO: Implement. + + case syscall.SIOCGIFNETMASK: + // Gets the network mask of a device. + // TODO: Implement. + + default: + // Not a valid call. + return syserr.ErrInvalidArgument + } + + return nil +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (s *SocketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch arg := int(args[1].Int()); arg { + case syscall.SIOCGIFFLAGS, + syscall.SIOCGIFADDR, + syscall.SIOCGIFBRDADDR, + syscall.SIOCGIFDSTADDR, + syscall.SIOCGIFHWADDR, + syscall.SIOCGIFINDEX, + syscall.SIOCGIFMAP, + syscall.SIOCGIFMETRIC, + syscall.SIOCGIFMTU, + syscall.SIOCGIFNETMASK, + syscall.SIOCGIFTXQLEN: + + var ifr linux.IFReq + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &ifr, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + if err := s.interfaceIoctl(ctx, io, arg, &ifr); err != nil { + return 0, err.ToError() + } + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), &ifr, usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + case syscall.SIOCGIFCONF: + // Return a list of interface addresses or the buffer size + // necessary to hold the list. + var ifc linux.IFConf + if _, err := usermem.CopyObjectIn(ctx, io, args[2].Pointer(), &ifc, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, err + } + + if err := s.ifconfIoctl(ctx, io, &ifc); err != nil { + return 0, err + } + + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), ifc, usermem.IOOpts{ + AddressSpaceActive: true, + }) + + return 0, err + } + + return Ioctl(ctx, s.Endpoint, io, args) +} + +// ifconfIoctl populates a struct ifconf for the SIOCGIFCONF ioctl. +func (s *SocketOperations) ifconfIoctl(ctx context.Context, io usermem.IO, ifc *linux.IFConf) error { + // If Ptr is NULL, return the necessary buffer size via Len. + // Otherwise, write up to Len bytes starting at Ptr containing ifreq + // structs. + if ifc.Ptr == 0 { + ifc.Len = int32(len(s.stack.Interfaces())) * int32(linux.SizeOfIFReq) + return nil + } + + max := ifc.Len + ifc.Len = 0 + for key, ifaceAddrs := range s.stack.InterfaceAddrs() { + iface := s.stack.Interfaces()[key] + for _, ifaceAddr := range ifaceAddrs { + // Don't write past the end of the buffer. + if ifc.Len+int32(linux.SizeOfIFReq) > max { + break + } + if ifaceAddr.Family != linux.AF_INET { + continue + } + + // Populate ifr.ifr_addr. + ifr := linux.IFReq{} + ifr.SetName(iface.Name) + usermem.ByteOrder.PutUint16(ifr.Data[0:2], uint16(ifaceAddr.Family)) + usermem.ByteOrder.PutUint16(ifr.Data[2:4], 0) + copy(ifr.Data[4:8], ifaceAddr.Addr[:4]) + + // Copy the ifr to userspace. + dst := uintptr(ifc.Ptr) + uintptr(ifc.Len) + ifc.Len += int32(linux.SizeOfIFReq) + if _, err := usermem.CopyObjectOut(ctx, io, usermem.Addr(dst), ifr, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return err + } + } + } + return nil +} + +// Ioctl implements fs.FileOperations.Ioctl for sockets backed by a +// commonEndpoint. +func Ioctl(ctx context.Context, ep commonEndpoint, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // Switch on ioctl request. + switch int(args[1].Int()) { + case linux.TIOCINQ: + var v tcpip.ReceiveQueueSizeOption + if err := ep.GetSockOpt(&v); err != nil { + return 0, syserr.TranslateNetstackError(err).ToError() + } + + if v > math.MaxInt32 { + v = math.MaxInt32 + } + // Copy result to user-space. + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + case linux.TIOCOUTQ: + var v tcpip.SendQueueSizeOption + if err := ep.GetSockOpt(&v); err != nil { + return 0, syserr.TranslateNetstackError(err).ToError() + } + + if v > math.MaxInt32 { + v = math.MaxInt32 + } + + // Copy result to user-space. + _, err := usermem.CopyObjectOut(ctx, io, args[2].Pointer(), int32(v), usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + } + + return 0, syserror.ENOTTY +} diff --git a/pkg/sentry/socket/epsocket/provider.go b/pkg/sentry/socket/epsocket/provider.go new file mode 100644 index 000000000..5616435b3 --- /dev/null +++ b/pkg/sentry/socket/epsocket/provider.go @@ -0,0 +1,113 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epsocket + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// provider is an inet socket provider. +type provider struct { + family int + netProto tcpip.NetworkProtocolNumber +} + +// GetTransportProtocol figures out transport protocol. Currently only TCP and +// UDP are supported. +func GetTransportProtocol(stype unix.SockType, protocol int) (tcpip.TransportProtocolNumber, *syserr.Error) { + switch stype { + case linux.SOCK_STREAM: + if protocol != 0 && protocol != syscall.IPPROTO_TCP { + return 0, syserr.ErrInvalidArgument + } + return tcp.ProtocolNumber, nil + + case linux.SOCK_DGRAM: + if protocol != 0 && protocol != syscall.IPPROTO_UDP { + return 0, syserr.ErrInvalidArgument + } + return udp.ProtocolNumber, nil + + default: + return 0, syserr.ErrInvalidArgument + } +} + +// Socket creates a new socket object for the AF_INET or AF_INET6 family. +func (p *provider) Socket(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *syserr.Error) { + // Fail right away if we don't have a stack. + stack := t.NetworkContext() + if stack == nil { + // Don't propagate an error here. Instead, allow the socket + // code to continue searching for another provider. + return nil, nil + } + eps, ok := stack.(*Stack) + if !ok { + return nil, nil + } + + // Figure out the transport protocol. + transProto, err := GetTransportProtocol(stype, protocol) + if err != nil { + return nil, err + } + + // Create the endpoint. + wq := &waiter.Queue{} + ep, e := eps.Stack.NewEndpoint(transProto, p.netProto, wq) + if e != nil { + return nil, syserr.TranslateNetstackError(e) + } + + return New(t, p.family, stype, wq, ep), nil +} + +// Pair just returns nil sockets (not supported). +func (*provider) Pair(*kernel.Task, unix.SockType, int) (*fs.File, *fs.File, *syserr.Error) { + return nil, nil, nil +} + +// init registers socket providers for AF_INET and AF_INET6. +func init() { + // Providers backed by netstack. + p := []provider{ + { + family: linux.AF_INET, + netProto: ipv4.ProtocolNumber, + }, + { + family: linux.AF_INET6, + netProto: ipv6.ProtocolNumber, + }, + } + + for i := range p { + socket.RegisterProvider(p[i].family, &p[i]) + } +} diff --git a/pkg/sentry/socket/epsocket/save_restore.go b/pkg/sentry/socket/epsocket/save_restore.go new file mode 100644 index 000000000..2613f90de --- /dev/null +++ b/pkg/sentry/socket/epsocket/save_restore.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epsocket + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// afterLoad is invoked by stateify. +func (s *Stack) afterLoad() { + s.Stack = stack.StackFromEnv // FIXME + if s.Stack == nil { + panic("can't restore without netstack/tcpip/stack.Stack") + } +} diff --git a/pkg/sentry/socket/epsocket/stack.go b/pkg/sentry/socket/epsocket/stack.go new file mode 100644 index 000000000..ec1d96ccb --- /dev/null +++ b/pkg/sentry/socket/epsocket/stack.go @@ -0,0 +1,132 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package epsocket + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" +) + +// Stack implements inet.Stack for netstack/tcpip/stack.Stack. +type Stack struct { + Stack *stack.Stack `state:"manual"` +} + +// SupportsIPv6 implements Stack.SupportsIPv6. +func (s *Stack) SupportsIPv6() bool { + return s.Stack.CheckNetworkProtocol(ipv6.ProtocolNumber) +} + +// Interfaces implements inet.Stack.Interfaces. +func (s *Stack) Interfaces() map[int32]inet.Interface { + is := make(map[int32]inet.Interface) + for id, ni := range s.Stack.NICInfo() { + is[int32(id)] = inet.Interface{ + Name: ni.Name, + Addr: []byte(ni.LinkAddress), + // TODO: Other fields. + } + } + return is +} + +// InterfaceAddrs implements inet.Stack.InterfaceAddrs. +func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr { + nicAddrs := make(map[int32][]inet.InterfaceAddr) + for id, ni := range s.Stack.NICInfo() { + var addrs []inet.InterfaceAddr + for _, a := range ni.ProtocolAddresses { + var family uint8 + switch a.Protocol { + case ipv4.ProtocolNumber: + family = linux.AF_INET + case ipv6.ProtocolNumber: + family = linux.AF_INET6 + default: + log.Warningf("Unknown network protocol in %+v", a) + continue + } + + addrs = append(addrs, inet.InterfaceAddr{ + Family: family, + PrefixLen: uint8(len(a.Address) * 8), + Addr: []byte(a.Address), + // TODO: Other fields. + }) + } + nicAddrs[int32(id)] = addrs + } + return nicAddrs +} + +// TCPReceiveBufferSize implements inet.Stack.TCPReceiveBufferSize. +func (s *Stack) TCPReceiveBufferSize() (inet.TCPBufferSize, error) { + var rs tcp.ReceiveBufferSizeOption + err := s.Stack.TransportProtocolOption(tcp.ProtocolNumber, &rs) + return inet.TCPBufferSize{ + Min: rs.Min, + Default: rs.Default, + Max: rs.Max, + }, syserr.TranslateNetstackError(err).ToError() +} + +// SetTCPReceiveBufferSize implements inet.Stack.SetTCPReceiveBufferSize. +func (s *Stack) SetTCPReceiveBufferSize(size inet.TCPBufferSize) error { + rs := tcp.ReceiveBufferSizeOption{ + Min: size.Min, + Default: size.Default, + Max: size.Max, + } + return syserr.TranslateNetstackError(s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, rs)).ToError() +} + +// TCPSendBufferSize implements inet.Stack.TCPSendBufferSize. +func (s *Stack) TCPSendBufferSize() (inet.TCPBufferSize, error) { + var ss tcp.SendBufferSizeOption + err := s.Stack.TransportProtocolOption(tcp.ProtocolNumber, &ss) + return inet.TCPBufferSize{ + Min: ss.Min, + Default: ss.Default, + Max: ss.Max, + }, syserr.TranslateNetstackError(err).ToError() +} + +// SetTCPSendBufferSize implements inet.Stack.SetTCPSendBufferSize. +func (s *Stack) SetTCPSendBufferSize(size inet.TCPBufferSize) error { + ss := tcp.SendBufferSizeOption{ + Min: size.Min, + Default: size.Default, + Max: size.Max, + } + return syserr.TranslateNetstackError(s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, ss)).ToError() +} + +// TCPSACKEnabled implements inet.Stack.TCPSACKEnabled. +func (s *Stack) TCPSACKEnabled() (bool, error) { + var sack tcp.SACKEnabled + err := s.Stack.TransportProtocolOption(tcp.ProtocolNumber, &sack) + return bool(sack), syserr.TranslateNetstackError(err).ToError() +} + +// SetTCPSACKEnabled implements inet.Stack.SetTCPSACKEnabled. +func (s *Stack) SetTCPSACKEnabled(enabled bool) error { + return syserr.TranslateNetstackError(s.Stack.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(enabled))).ToError() +} diff --git a/pkg/sentry/socket/hostinet/BUILD b/pkg/sentry/socket/hostinet/BUILD new file mode 100644 index 000000000..60ec265ba --- /dev/null +++ b/pkg/sentry/socket/hostinet/BUILD @@ -0,0 +1,53 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "hostinet_state", + srcs = [ + "save_restore.go", + "socket.go", + "stack.go", + ], + out = "hostinet_autogen_state.go", + package = "hostinet", +) + +go_library( + name = "hostinet", + srcs = [ + "device.go", + "hostinet.go", + "hostinet_autogen_state.go", + "save_restore.go", + "socket.go", + "socket_unsafe.go", + "stack.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/hostinet", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/log", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/safemem", + "//pkg/sentry/socket", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + "//pkg/waiter/fdnotifier", + ], +) diff --git a/pkg/sentry/socket/hostinet/device.go b/pkg/sentry/socket/hostinet/device.go new file mode 100644 index 000000000..a9a673316 --- /dev/null +++ b/pkg/sentry/socket/hostinet/device.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostinet + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +var socketDevice = device.NewAnonDevice() diff --git a/pkg/sentry/socket/hostinet/hostinet.go b/pkg/sentry/socket/hostinet/hostinet.go new file mode 100644 index 000000000..67c6c8066 --- /dev/null +++ b/pkg/sentry/socket/hostinet/hostinet.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package hostinet implements AF_INET and AF_INET6 sockets using the host's +// network stack. +package hostinet diff --git a/pkg/sentry/socket/hostinet/save_restore.go b/pkg/sentry/socket/hostinet/save_restore.go new file mode 100644 index 000000000..0821a794a --- /dev/null +++ b/pkg/sentry/socket/hostinet/save_restore.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostinet + +// beforeSave is invoked by stateify. +func (*socketOperations) beforeSave() { + panic("host.socketOperations is not savable") +} diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go new file mode 100644 index 000000000..defa3db2c --- /dev/null +++ b/pkg/sentry/socket/hostinet/socket.go @@ -0,0 +1,562 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostinet + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" + "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier" +) + +const ( + sizeofInt32 = 4 + + // sizeofSockaddr is the size in bytes of the largest sockaddr type + // supported by this package. + sizeofSockaddr = syscall.SizeofSockaddrInet6 // sizeof(sockaddr_in6) > sizeof(sockaddr_in) +) + +// socketOperations implements fs.FileOperations and socket.Socket for a socket +// implemented using a host socket. +type socketOperations struct { + socket.ReceiveTimeout + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + + fd int // must be O_NONBLOCK + queue waiter.Queue +} + +func newSocketFile(ctx context.Context, fd int, nonblock bool) (*fs.File, *syserr.Error) { + s := &socketOperations{fd: fd} + if err := fdnotifier.AddFD(int32(fd), &s.queue); err != nil { + return nil, syserr.FromError(err) + } + dirent := socket.NewDirent(ctx, socketDevice) + return fs.NewFile(ctx, dirent, fs.FileFlags{NonBlocking: nonblock, Read: true, Write: true}, s), nil +} + +// Release implements fs.FileOperations.Release. +func (s *socketOperations) Release() { + fdnotifier.RemoveFD(int32(s.fd)) + syscall.Close(s.fd) +} + +// Readiness implements waiter.Waitable.Readiness. +func (s *socketOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return fdnotifier.NonBlockingPoll(int32(s.fd), mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (s *socketOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + s.queue.EventRegister(e, mask) + fdnotifier.UpdateFD(int32(s.fd)) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (s *socketOperations) EventUnregister(e *waiter.Entry) { + s.queue.EventUnregister(e) + fdnotifier.UpdateFD(int32(s.fd)) +} + +// Read implements fs.FileOperations.Read. +func (s *socketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + n, err := dst.CopyOutFrom(ctx, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + // Refuse to do anything if any part of dst.Addrs was unusable. + if uint64(dst.NumBytes()) != dsts.NumBytes() { + return 0, nil + } + if dsts.IsEmpty() { + return 0, nil + } + if dsts.NumBlocks() == 1 { + // Skip allocating []syscall.Iovec. + n, err := syscall.Read(s.fd, dsts.Head().ToSlice()) + if err != nil { + return 0, translateIOSyscallError(err) + } + return uint64(n), nil + } + return readv(s.fd, iovecsFromBlockSeq(dsts)) + })) + return int64(n), err +} + +// Write implements fs.FileOperations.Write. +func (s *socketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + n, err := src.CopyInTo(ctx, safemem.WriterFunc(func(srcs safemem.BlockSeq) (uint64, error) { + // Refuse to do anything if any part of src.Addrs was unusable. + if uint64(src.NumBytes()) != srcs.NumBytes() { + return 0, nil + } + if srcs.IsEmpty() { + return 0, nil + } + if srcs.NumBlocks() == 1 { + // Skip allocating []syscall.Iovec. + n, err := syscall.Write(s.fd, srcs.Head().ToSlice()) + if err != nil { + return 0, translateIOSyscallError(err) + } + return uint64(n), nil + } + return writev(s.fd, iovecsFromBlockSeq(srcs)) + })) + return int64(n), err +} + +// Connect implements socket.Socket.Connect. +func (s *socketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error { + if len(sockaddr) > sizeofSockaddr { + sockaddr = sockaddr[:sizeofSockaddr] + } + + _, _, errno := syscall.Syscall(syscall.SYS_CONNECT, uintptr(s.fd), uintptr(firstBytePtr(sockaddr)), uintptr(len(sockaddr))) + + if errno == 0 { + return nil + } + if errno != syscall.EINPROGRESS || !blocking { + return syserr.FromError(translateIOSyscallError(errno)) + } + + // "EINPROGRESS: The socket is nonblocking and the connection cannot be + // completed immediately. It is possible to select(2) or poll(2) for + // completion by selecting the socket for writing. After select(2) + // indicates writability, use getsockopt(2) to read the SO_ERROR option at + // level SOL-SOCKET to determine whether connect() completed successfully + // (SO_ERROR is zero) or unsuccessfully (SO_ERROR is one of the usual error + // codes listed here, explaining the reason for the failure)." - connect(2) + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + if s.Readiness(waiter.EventOut)&waiter.EventOut == 0 { + if err := t.Block(ch); err != nil { + return syserr.FromError(err) + } + } + val, err := syscall.GetsockoptInt(s.fd, syscall.SOL_SOCKET, syscall.SO_ERROR) + if err != nil { + return syserr.FromError(err) + } + if val != 0 { + return syserr.FromError(syscall.Errno(uintptr(val))) + } + return nil +} + +// Accept implements socket.Socket.Accept. +func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) { + var peerAddr []byte + var peerAddrlen uint32 + var peerAddrPtr *byte + var peerAddrlenPtr *uint32 + if peerRequested { + peerAddr = make([]byte, sizeofSockaddr) + peerAddrlen = uint32(len(peerAddr)) + peerAddrPtr = &peerAddr[0] + peerAddrlenPtr = &peerAddrlen + } + + // Conservatively ignore all flags specified by the application and add + // SOCK_NONBLOCK since socketOperations requires it. + fd, syscallErr := accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK) + if blocking { + var ch chan struct{} + for syscallErr == syserror.ErrWouldBlock { + if ch != nil { + if syscallErr = t.Block(ch); syscallErr != nil { + break + } + } else { + var e waiter.Entry + e, ch = waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + } + fd, syscallErr = accept4(s.fd, peerAddrPtr, peerAddrlenPtr, syscall.SOCK_NONBLOCK) + } + } + + if peerRequested { + peerAddr = peerAddr[:peerAddrlen] + } + if syscallErr != nil { + return 0, peerAddr, peerAddrlen, syserr.FromError(syscallErr) + } + + f, err := newSocketFile(t, fd, flags&syscall.SOCK_NONBLOCK != 0) + if err != nil { + syscall.Close(fd) + return 0, nil, 0, err + } + defer f.DecRef() + + fdFlags := kernel.FDFlags{ + CloseOnExec: flags&syscall.SOCK_CLOEXEC != 0, + } + kfd, kerr := t.FDMap().NewFDFrom(0, f, fdFlags, t.ThreadGroup().Limits()) + return kfd, peerAddr, peerAddrlen, syserr.FromError(kerr) +} + +// Bind implements socket.Socket.Bind. +func (s *socketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { + if len(sockaddr) > sizeofSockaddr { + sockaddr = sockaddr[:sizeofSockaddr] + } + + _, _, errno := syscall.Syscall(syscall.SYS_BIND, uintptr(s.fd), uintptr(firstBytePtr(sockaddr)), uintptr(len(sockaddr))) + if errno != 0 { + return syserr.FromError(errno) + } + return nil +} + +// Listen implements socket.Socket.Listen. +func (s *socketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error { + return syserr.FromError(syscall.Listen(s.fd, backlog)) +} + +// Shutdown implements socket.Socket.Shutdown. +func (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error { + switch how { + case syscall.SHUT_RD, syscall.SHUT_WR, syscall.SHUT_RDWR: + return syserr.FromError(syscall.Shutdown(s.fd, how)) + default: + return syserr.ErrInvalidArgument + } +} + +// GetSockOpt implements socket.Socket.GetSockOpt. +func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) { + if outLen < 0 { + return nil, syserr.ErrInvalidArgument + } + + // Whitelist options and constrain option length. + var optlen int + switch level { + case syscall.SOL_IPV6: + switch name { + case syscall.IPV6_V6ONLY: + optlen = sizeofInt32 + } + case syscall.SOL_SOCKET: + switch name { + case syscall.SO_ERROR, syscall.SO_KEEPALIVE, syscall.SO_SNDBUF, syscall.SO_RCVBUF, syscall.SO_REUSEADDR, syscall.SO_TYPE: + optlen = sizeofInt32 + case syscall.SO_LINGER: + optlen = syscall.SizeofLinger + } + case syscall.SOL_TCP: + switch name { + case syscall.TCP_NODELAY: + optlen = sizeofInt32 + case syscall.TCP_INFO: + optlen = int(linux.SizeOfTCPInfo) + } + } + if optlen == 0 { + return nil, syserr.ErrProtocolNotAvailable // ENOPROTOOPT + } + if outLen < optlen { + return nil, syserr.ErrInvalidArgument + } + + opt, err := getsockopt(s.fd, level, name, optlen) + if err != nil { + return nil, syserr.FromError(err) + } + return opt, nil +} + +// SetSockOpt implements socket.Socket.SetSockOpt. +func (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error { + // Whitelist options and constrain option length. + var optlen int + switch level { + case syscall.SOL_IPV6: + switch name { + case syscall.IPV6_V6ONLY: + optlen = sizeofInt32 + } + case syscall.SOL_SOCKET: + switch name { + case syscall.SO_SNDBUF, syscall.SO_RCVBUF, syscall.SO_REUSEADDR: + optlen = sizeofInt32 + } + case syscall.SOL_TCP: + switch name { + case syscall.TCP_NODELAY: + optlen = sizeofInt32 + } + } + if optlen == 0 { + // Pretend to accept socket options we don't understand. This seems + // dangerous, but it's what netstack does... + return nil + } + if len(opt) < optlen { + return syserr.ErrInvalidArgument + } + opt = opt[:optlen] + + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, uintptr(s.fd), uintptr(level), uintptr(name), uintptr(firstBytePtr(opt)), uintptr(len(opt)), 0) + if errno != 0 { + return syserr.FromError(errno) + } + return nil +} + +// RecvMsg implements socket.Socket.RecvMsg. +func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, unix.ControlMessages, *syserr.Error) { + // Whitelist flags. + // + // FIXME: We can't support MSG_ERRQUEUE because it uses ancillary + // messages that netstack/tcpip/transport/unix doesn't understand. Kill the + // Socket interface's dependence on netstack. + if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_PEEK|syscall.MSG_TRUNC) != 0 { + return 0, nil, 0, unix.ControlMessages{}, syserr.ErrInvalidArgument + } + + var senderAddr []byte + if senderRequested { + senderAddr = make([]byte, sizeofSockaddr) + } + + recvmsgToBlocks := safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { + // Refuse to do anything if any part of dst.Addrs was unusable. + if uint64(dst.NumBytes()) != dsts.NumBytes() { + return 0, nil + } + if dsts.IsEmpty() { + return 0, nil + } + + // We always do a non-blocking recv*(). + sysflags := flags | syscall.MSG_DONTWAIT + + if dsts.NumBlocks() == 1 { + // Skip allocating []syscall.Iovec. + return recvfrom(s.fd, dsts.Head().ToSlice(), sysflags, &senderAddr) + } + + iovs := iovecsFromBlockSeq(dsts) + msg := syscall.Msghdr{ + Iov: &iovs[0], + Iovlen: uint64(len(iovs)), + } + if len(senderAddr) != 0 { + msg.Name = &senderAddr[0] + msg.Namelen = uint32(len(senderAddr)) + } + n, err := recvmsg(s.fd, &msg, sysflags) + if err != nil { + return 0, err + } + senderAddr = senderAddr[:msg.Namelen] + return n, nil + }) + + var ch chan struct{} + n, err := dst.CopyOutFrom(t, recvmsgToBlocks) + if flags&syscall.MSG_DONTWAIT == 0 { + for err == syserror.ErrWouldBlock { + // We only expect blocking to come from the actual syscall, in which + // case it can't have returned any data. + if n != 0 { + panic(fmt.Sprintf("CopyOutFrom: got (%d, %v), wanted (0, %v)", n, err, err)) + } + if ch != nil { + if err = t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + break + } + } else { + var e waiter.Entry + e, ch = waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + } + n, err = dst.CopyOutFrom(t, recvmsgToBlocks) + } + } + + return int(n), senderAddr, uint32(len(senderAddr)), unix.ControlMessages{}, syserr.FromError(err) +} + +// SendMsg implements socket.Socket.SendMsg. +func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + // Whitelist flags. + if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 { + return 0, syserr.ErrInvalidArgument + } + + sendmsgFromBlocks := safemem.WriterFunc(func(srcs safemem.BlockSeq) (uint64, error) { + // Refuse to do anything if any part of src.Addrs was unusable. + if uint64(src.NumBytes()) != srcs.NumBytes() { + return 0, nil + } + if srcs.IsEmpty() { + return 0, nil + } + + // We always do a non-blocking send*(). + sysflags := flags | syscall.MSG_DONTWAIT + + if srcs.NumBlocks() == 1 { + // Skip allocating []syscall.Iovec. + src := srcs.Head() + n, _, errno := syscall.Syscall6(syscall.SYS_SENDTO, uintptr(s.fd), src.Addr(), uintptr(src.Len()), uintptr(sysflags), uintptr(firstBytePtr(to)), uintptr(len(to))) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return uint64(n), nil + } + + iovs := iovecsFromBlockSeq(srcs) + msg := syscall.Msghdr{ + Iov: &iovs[0], + Iovlen: uint64(len(iovs)), + } + if len(to) != 0 { + msg.Name = &to[0] + msg.Namelen = uint32(len(to)) + } + return sendmsg(s.fd, &msg, sysflags) + }) + + var ch chan struct{} + n, err := src.CopyInTo(t, sendmsgFromBlocks) + if flags&syscall.MSG_DONTWAIT == 0 { + for err == syserror.ErrWouldBlock { + // We only expect blocking to come from the actual syscall, in which + // case it can't have returned any data. + if n != 0 { + panic(fmt.Sprintf("CopyInTo: got (%d, %v), wanted (0, %v)", n, err, err)) + } + if ch != nil { + if err = t.Block(ch); err != nil { + break + } + } else { + var e waiter.Entry + e, ch = waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + } + n, err = src.CopyInTo(t, sendmsgFromBlocks) + } + } + + return int(n), syserr.FromError(err) +} + +func iovecsFromBlockSeq(bs safemem.BlockSeq) []syscall.Iovec { + iovs := make([]syscall.Iovec, 0, bs.NumBlocks()) + for ; !bs.IsEmpty(); bs = bs.Tail() { + b := bs.Head() + iovs = append(iovs, syscall.Iovec{ + Base: &b.ToSlice()[0], + Len: uint64(b.Len()), + }) + // We don't need to care about b.NeedSafecopy(), because the host + // kernel will handle such address ranges just fine (by returning + // EFAULT). + } + return iovs +} + +func translateIOSyscallError(err error) error { + if err == syscall.EAGAIN || err == syscall.EWOULDBLOCK { + return syserror.ErrWouldBlock + } + return err +} + +type socketProvider struct { + family int +} + +// Socket implements socket.Provider.Socket. +func (p *socketProvider) Socket(t *kernel.Task, stypeflags unix.SockType, protocol int) (*fs.File, *syserr.Error) { + // Check that we are using the host network stack. + stack := t.NetworkContext() + if stack == nil { + return nil, nil + } + if _, ok := stack.(*Stack); !ok { + return nil, nil + } + + // Only accept TCP and UDP. + stype := int(stypeflags) & linux.SOCK_TYPE_MASK + switch stype { + case syscall.SOCK_STREAM: + switch protocol { + case 0, syscall.IPPROTO_TCP: + // ok + default: + return nil, nil + } + case syscall.SOCK_DGRAM: + switch protocol { + case 0, syscall.IPPROTO_UDP: + // ok + default: + return nil, nil + } + default: + return nil, nil + } + + // Conservatively ignore all flags specified by the application and add + // SOCK_NONBLOCK since socketOperations requires it. Pass a protocol of 0 + // to simplify the syscall filters, since 0 and IPPROTO_* are equivalent. + fd, err := syscall.Socket(p.family, stype|syscall.SOCK_NONBLOCK, 0) + if err != nil { + return nil, syserr.FromError(err) + } + return newSocketFile(t, fd, stypeflags&syscall.SOCK_NONBLOCK != 0) +} + +// Pair implements socket.Provider.Pair. +func (p *socketProvider) Pair(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) { + // Not supported by AF_INET/AF_INET6. + return nil, nil, nil +} + +func init() { + for _, family := range []int{syscall.AF_INET, syscall.AF_INET6} { + socket.RegisterProvider(family, &socketProvider{family}) + } +} diff --git a/pkg/sentry/socket/hostinet/socket_unsafe.go b/pkg/sentry/socket/hostinet/socket_unsafe.go new file mode 100644 index 000000000..f8bb75636 --- /dev/null +++ b/pkg/sentry/socket/hostinet/socket_unsafe.go @@ -0,0 +1,138 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostinet + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func firstBytePtr(bs []byte) unsafe.Pointer { + if bs == nil { + return nil + } + return unsafe.Pointer(&bs[0]) +} + +// Preconditions: len(dsts) != 0. +func readv(fd int, dsts []syscall.Iovec) (uint64, error) { + n, _, errno := syscall.Syscall(syscall.SYS_READV, uintptr(fd), uintptr(unsafe.Pointer(&dsts[0])), uintptr(len(dsts))) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return uint64(n), nil +} + +// Preconditions: len(srcs) != 0. +func writev(fd int, srcs []syscall.Iovec) (uint64, error) { + n, _, errno := syscall.Syscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&srcs[0])), uintptr(len(srcs))) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return uint64(n), nil +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + switch cmd := uintptr(args[1].Int()); cmd { + case syscall.TIOCINQ, syscall.TIOCOUTQ: + var val int32 + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s.fd), cmd, uintptr(unsafe.Pointer(&val))); errno != 0 { + return 0, translateIOSyscallError(errno) + } + var buf [4]byte + usermem.ByteOrder.PutUint32(buf[:], uint32(val)) + _, err := io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{ + AddressSpaceActive: true, + }) + return 0, err + + default: + return 0, syserror.ENOTTY + } +} + +func accept4(fd int, addr *byte, addrlen *uint32, flags int) (int, error) { + afd, _, errno := syscall.Syscall6(syscall.SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(addr)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return int(afd), nil +} + +func getsockopt(fd int, level, name int, optlen int) ([]byte, error) { + opt := make([]byte, optlen) + optlen32 := int32(len(opt)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(name), uintptr(firstBytePtr(opt)), uintptr(unsafe.Pointer(&optlen32)), 0) + if errno != 0 { + return nil, errno + } + return opt[:optlen32], nil +} + +// GetSockName implements socket.Socket.GetSockName. +func (s *socketOperations) GetSockName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr := make([]byte, sizeofSockaddr) + addrlen := uint32(len(addr)) + _, _, errno := syscall.Syscall(syscall.SYS_GETSOCKNAME, uintptr(s.fd), uintptr(unsafe.Pointer(&addr[0])), uintptr(unsafe.Pointer(&addrlen))) + if errno != 0 { + return nil, 0, syserr.FromError(errno) + } + return addr[:addrlen], addrlen, nil +} + +// GetPeerName implements socket.Socket.GetPeerName. +func (s *socketOperations) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr := make([]byte, sizeofSockaddr) + addrlen := uint32(len(addr)) + _, _, errno := syscall.Syscall(syscall.SYS_GETPEERNAME, uintptr(s.fd), uintptr(unsafe.Pointer(&addr[0])), uintptr(unsafe.Pointer(&addrlen))) + if errno != 0 { + return nil, 0, syserr.FromError(errno) + } + return addr[:addrlen], addrlen, nil +} + +func recvfrom(fd int, dst []byte, flags int, from *[]byte) (uint64, error) { + fromLen := uint32(len(*from)) + n, _, errno := syscall.Syscall6(syscall.SYS_RECVFROM, uintptr(fd), uintptr(firstBytePtr(dst)), uintptr(len(dst)), uintptr(flags), uintptr(firstBytePtr(*from)), uintptr(unsafe.Pointer(&fromLen))) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + *from = (*from)[:fromLen] + return uint64(n), nil +} + +func recvmsg(fd int, msg *syscall.Msghdr, flags int) (uint64, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return uint64(n), nil +} + +func sendmsg(fd int, msg *syscall.Msghdr, flags int) (uint64, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + if errno != 0 { + return 0, translateIOSyscallError(errno) + } + return uint64(n), nil +} diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go new file mode 100644 index 000000000..44c3b9a3f --- /dev/null +++ b/pkg/sentry/socket/hostinet/stack.go @@ -0,0 +1,244 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hostinet + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +var defaultRecvBufSize = inet.TCPBufferSize{ + Min: 4096, + Default: 87380, + Max: 6291456, +} + +var defaultSendBufSize = inet.TCPBufferSize{ + Min: 4096, + Default: 16384, + Max: 4194304, +} + +// Stack implements inet.Stack for host sockets. +type Stack struct { + // Stack is immutable. + interfaces map[int32]inet.Interface + interfaceAddrs map[int32][]inet.InterfaceAddr + supportsIPv6 bool + tcpRecvBufSize inet.TCPBufferSize + tcpSendBufSize inet.TCPBufferSize + tcpSACKEnabled bool +} + +// NewStack returns an empty Stack containing no configuration. +func NewStack() *Stack { + return &Stack{ + interfaces: make(map[int32]inet.Interface), + interfaceAddrs: make(map[int32][]inet.InterfaceAddr), + } +} + +// Configure sets up the stack using the current state of the host network. +func (s *Stack) Configure() error { + if err := addHostInterfaces(s); err != nil { + return err + } + + if _, err := os.Stat("/proc/net/if_inet6"); err == nil { + s.supportsIPv6 = true + } + + s.tcpRecvBufSize = defaultRecvBufSize + if tcpRMem, err := readTCPBufferSizeFile("/proc/sys/net/ipv4/tcp_rmem"); err == nil { + s.tcpRecvBufSize = tcpRMem + } else { + log.Warningf("Failed to read TCP receive buffer size, using default values") + } + + s.tcpSendBufSize = defaultSendBufSize + if tcpWMem, err := readTCPBufferSizeFile("/proc/sys/net/ipv4/tcp_wmem"); err == nil { + s.tcpSendBufSize = tcpWMem + } else { + log.Warningf("Failed to read TCP send buffer size, using default values") + } + + s.tcpSACKEnabled = false + if sack, err := ioutil.ReadFile("/proc/sys/net/ipv4/tcp_sack"); err == nil { + s.tcpSACKEnabled = strings.TrimSpace(string(sack)) != "0" + } else { + log.Warningf("Failed to read if TCP SACK if enabled, setting to false") + } + + return nil +} + +// ExtractHostInterfaces will populate an interface map and +// interfaceAddrs map with the results of the equivalent +// netlink messages. +func ExtractHostInterfaces(links []syscall.NetlinkMessage, addrs []syscall.NetlinkMessage, interfaces map[int32]inet.Interface, interfaceAddrs map[int32][]inet.InterfaceAddr) error { + for _, link := range links { + if link.Header.Type != syscall.RTM_NEWLINK { + continue + } + if len(link.Data) < syscall.SizeofIfInfomsg { + return fmt.Errorf("RTM_GETLINK returned RTM_NEWLINK message with invalid data length (%d bytes, expected at least %d bytes)", len(link.Data), syscall.SizeofIfInfomsg) + } + var ifinfo syscall.IfInfomsg + binary.Unmarshal(link.Data[:syscall.SizeofIfInfomsg], usermem.ByteOrder, &ifinfo) + inetIF := inet.Interface{ + DeviceType: ifinfo.Type, + Flags: ifinfo.Flags, + } + // Not clearly documented: syscall.ParseNetlinkRouteAttr will check the + // syscall.NetlinkMessage.Header.Type and skip the struct ifinfomsg + // accordingly. + attrs, err := syscall.ParseNetlinkRouteAttr(&link) + if err != nil { + return fmt.Errorf("RTM_GETLINK returned RTM_NEWLINK message with invalid rtattrs: %v", err) + } + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFLA_ADDRESS: + inetIF.Addr = attr.Value + case syscall.IFLA_IFNAME: + inetIF.Name = string(attr.Value[:len(attr.Value)-1]) + } + } + interfaces[ifinfo.Index] = inetIF + } + + for _, addr := range addrs { + if addr.Header.Type != syscall.RTM_NEWADDR { + continue + } + if len(addr.Data) < syscall.SizeofIfAddrmsg { + return fmt.Errorf("RTM_GETADDR returned RTM_NEWADDR message with invalid data length (%d bytes, expected at least %d bytes)", len(addr.Data), syscall.SizeofIfAddrmsg) + } + var ifaddr syscall.IfAddrmsg + binary.Unmarshal(addr.Data[:syscall.SizeofIfAddrmsg], usermem.ByteOrder, &ifaddr) + inetAddr := inet.InterfaceAddr{ + Family: ifaddr.Family, + PrefixLen: ifaddr.Prefixlen, + Flags: ifaddr.Flags, + } + attrs, err := syscall.ParseNetlinkRouteAttr(&addr) + if err != nil { + return fmt.Errorf("RTM_GETADDR returned RTM_NEWADDR message with invalid rtattrs: %v", err) + } + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFA_ADDRESS: + inetAddr.Addr = attr.Value + } + } + interfaceAddrs[int32(ifaddr.Index)] = append(interfaceAddrs[int32(ifaddr.Index)], inetAddr) + } + + return nil +} + +func addHostInterfaces(s *Stack) error { + links, err := doNetlinkRouteRequest(syscall.RTM_GETLINK) + if err != nil { + return fmt.Errorf("RTM_GETLINK failed: %v", err) + } + + addrs, err := doNetlinkRouteRequest(syscall.RTM_GETADDR) + if err != nil { + return fmt.Errorf("RTM_GETADDR failed: %v", err) + } + + return ExtractHostInterfaces(links, addrs, s.interfaces, s.interfaceAddrs) +} + +func doNetlinkRouteRequest(req int) ([]syscall.NetlinkMessage, error) { + data, err := syscall.NetlinkRIB(req, syscall.AF_UNSPEC) + if err != nil { + return nil, err + } + return syscall.ParseNetlinkMessage(data) +} + +func readTCPBufferSizeFile(filename string) (inet.TCPBufferSize, error) { + contents, err := ioutil.ReadFile(filename) + if err != nil { + return inet.TCPBufferSize{}, fmt.Errorf("failed to read %s: %v", filename, err) + } + ioseq := usermem.BytesIOSequence(contents) + fields := make([]int32, 3) + if n, err := usermem.CopyInt32StringsInVec(context.Background(), ioseq.IO, ioseq.Addrs, fields, ioseq.Opts); n != ioseq.NumBytes() || err != nil { + return inet.TCPBufferSize{}, fmt.Errorf("failed to parse %s (%q): got %v after %d/%d bytes", filename, contents, err, n, ioseq.NumBytes()) + } + return inet.TCPBufferSize{ + Min: int(fields[0]), + Default: int(fields[1]), + Max: int(fields[2]), + }, nil +} + +// Interfaces implements inet.Stack.Interfaces. +func (s *Stack) Interfaces() map[int32]inet.Interface { + return s.interfaces +} + +// InterfaceAddrs implements inet.Stack.InterfaceAddrs. +func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr { + return s.interfaceAddrs +} + +// SupportsIPv6 implements inet.Stack.SupportsIPv6. +func (s *Stack) SupportsIPv6() bool { + return s.supportsIPv6 +} + +// TCPReceiveBufferSize implements inet.Stack.TCPReceiveBufferSize. +func (s *Stack) TCPReceiveBufferSize() (inet.TCPBufferSize, error) { + return s.tcpRecvBufSize, nil +} + +// SetTCPReceiveBufferSize implements inet.Stack.SetTCPReceiveBufferSize. +func (s *Stack) SetTCPReceiveBufferSize(size inet.TCPBufferSize) error { + return syserror.EACCES +} + +// TCPSendBufferSize implements inet.Stack.TCPSendBufferSize. +func (s *Stack) TCPSendBufferSize() (inet.TCPBufferSize, error) { + return s.tcpSendBufSize, nil +} + +// SetTCPSendBufferSize implements inet.Stack.SetTCPSendBufferSize. +func (s *Stack) SetTCPSendBufferSize(size inet.TCPBufferSize) error { + return syserror.EACCES +} + +// TCPSACKEnabled implements inet.Stack.TCPSACKEnabled. +func (s *Stack) TCPSACKEnabled() (bool, error) { + return s.tcpSACKEnabled, nil +} + +// SetTCPSACKEnabled implements inet.Stack.SetTCPSACKEnabled. +func (s *Stack) SetTCPSACKEnabled(enabled bool) error { + return syserror.EACCES +} diff --git a/pkg/sentry/socket/netlink/BUILD b/pkg/sentry/socket/netlink/BUILD new file mode 100644 index 000000000..9df3ab17c --- /dev/null +++ b/pkg/sentry/socket/netlink/BUILD @@ -0,0 +1,47 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "netlink_state", + srcs = [ + "socket.go", + ], + out = "netlink_state.go", + package = "netlink", +) + +go_library( + name = "netlink", + srcs = [ + "message.go", + "netlink_state.go", + "provider.go", + "socket.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/socket", + "//pkg/sentry/socket/netlink/port", + "//pkg/sentry/socket/unix", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/socket/netlink/message.go b/pkg/sentry/socket/netlink/message.go new file mode 100644 index 000000000..b902d7ec9 --- /dev/null +++ b/pkg/sentry/socket/netlink/message.go @@ -0,0 +1,159 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +import ( + "fmt" + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// alignUp rounds a length up to an alignment. +// +// Preconditions: align is a power of two. +func alignUp(length int, align uint) int { + return (length + int(align) - 1) &^ (int(align) - 1) +} + +// Message contains a complete serialized netlink message. +type Message struct { + buf []byte +} + +// NewMessage creates a new Message containing the passed header. +// +// The header length will be updated by Finalize. +func NewMessage(hdr linux.NetlinkMessageHeader) *Message { + return &Message{ + buf: binary.Marshal(nil, usermem.ByteOrder, hdr), + } +} + +// Finalize returns the []byte containing the entire message, with the total +// length set in the message header. The Message must not be modified after +// calling Finalize. +func (m *Message) Finalize() []byte { + // Update length, which is the first 4 bytes of the header. + usermem.ByteOrder.PutUint32(m.buf, uint32(len(m.buf))) + + // Align the message. Note that the message length in the header (set + // above) is the useful length of the message, not the total aligned + // length. See net/netlink/af_netlink.c:__nlmsg_put. + aligned := alignUp(len(m.buf), linux.NLMSG_ALIGNTO) + m.putZeros(aligned - len(m.buf)) + return m.buf +} + +// putZeros adds n zeros to the message. +func (m *Message) putZeros(n int) { + for n > 0 { + m.buf = append(m.buf, 0) + n-- + } +} + +// Put serializes v into the message. +func (m *Message) Put(v interface{}) { + m.buf = binary.Marshal(m.buf, usermem.ByteOrder, v) +} + +// PutAttr adds v to the message as a netlink attribute. +// +// Preconditions: The serialized attribute (linux.NetlinkAttrHeaderSize + +// binary.Size(v) fits in math.MaxUint16 bytes. +func (m *Message) PutAttr(atype uint16, v interface{}) { + l := linux.NetlinkAttrHeaderSize + int(binary.Size(v)) + if l > math.MaxUint16 { + panic(fmt.Sprintf("attribute too large: %d", l)) + } + + m.Put(linux.NetlinkAttrHeader{ + Type: atype, + Length: uint16(l), + }) + m.Put(v) + + // Align the attribute. + aligned := alignUp(l, linux.NLA_ALIGNTO) + m.putZeros(aligned - l) +} + +// PutAttrString adds s to the message as a netlink attribute. +func (m *Message) PutAttrString(atype uint16, s string) { + l := linux.NetlinkAttrHeaderSize + len(s) + 1 + m.Put(linux.NetlinkAttrHeader{ + Type: atype, + Length: uint16(l), + }) + + // String + NUL-termination. + m.Put([]byte(s)) + m.putZeros(1) + + // Align the attribute. + aligned := alignUp(l, linux.NLA_ALIGNTO) + m.putZeros(aligned - l) +} + +// MessageSet contains a series of netlink messages. +type MessageSet struct { + // Multi indicates that this a multi-part message, to be terminated by + // NLMSG_DONE. NLMSG_DONE is sent even if the set contains only one + // Message. + // + // If Multi is set, all added messages will have NLM_F_MULTI set. + Multi bool + + // PortID is the destination port for all messages. + PortID int32 + + // Seq is the sequence counter for all messages in the set. + Seq uint32 + + // Messages contains the messages in the set. + Messages []*Message +} + +// NewMessageSet creates a new MessageSet. +// +// portID is the destination port to set as PortID in all messages. +// +// seq is the sequence counter to set as seq in all messages in the set. +func NewMessageSet(portID int32, seq uint32) *MessageSet { + return &MessageSet{ + PortID: portID, + Seq: seq, + } +} + +// AddMessage adds a new message to the set and returns it for further +// additions. +// +// The passed header will have Seq, PortID and the multi flag set +// automatically. +func (ms *MessageSet) AddMessage(hdr linux.NetlinkMessageHeader) *Message { + hdr.Seq = ms.Seq + hdr.PortID = uint32(ms.PortID) + if ms.Multi { + hdr.Flags |= linux.NLM_F_MULTI + } + + m := NewMessage(hdr) + ms.Messages = append(ms.Messages, m) + return m +} diff --git a/pkg/sentry/socket/netlink/port/BUILD b/pkg/sentry/socket/netlink/port/BUILD new file mode 100644 index 000000000..7340b95c9 --- /dev/null +++ b/pkg/sentry/socket/netlink/port/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "port_state", + srcs = ["port.go"], + out = "port_state.go", + package = "port", +) + +go_library( + name = "port", + srcs = [ + "port.go", + "port_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink/port", + visibility = ["//pkg/sentry:internal"], + deps = ["//pkg/state"], +) + +go_test( + name = "port_test", + srcs = ["port_test.go"], + embed = [":port"], +) diff --git a/pkg/sentry/socket/netlink/port/port.go b/pkg/sentry/socket/netlink/port/port.go new file mode 100644 index 000000000..4ccf0b84c --- /dev/null +++ b/pkg/sentry/socket/netlink/port/port.go @@ -0,0 +1,114 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package port provides port ID allocation for netlink sockets. +// +// A netlink port is any int32 value. Positive ports are typically equivalent +// to the PID of the binding process. If that port is unavailable, negative +// ports are searched to find a free port that will not conflict with other +// PIDS. +package port + +import ( + "fmt" + "math" + "math/rand" + "sync" +) + +// maxPorts is a sanity limit on the maximum number of ports to allocate per +// protocol. +const maxPorts = 10000 + +// Manager allocates netlink port IDs. +type Manager struct { + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // ports contains a map of allocated ports for each protocol. + ports map[int]map[int32]struct{} +} + +// New creates a new Manager. +func New() *Manager { + return &Manager{ + ports: make(map[int]map[int32]struct{}), + } +} + +// Allocate reserves a new port ID for protocol. hint will be taken if +// available. +func (m *Manager) Allocate(protocol int, hint int32) (int32, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + proto, ok := m.ports[protocol] + if !ok { + proto = make(map[int32]struct{}) + // Port 0 is reserved for the kernel. + proto[0] = struct{}{} + m.ports[protocol] = proto + } + + if len(proto) >= maxPorts { + return 0, false + } + + if _, ok := proto[hint]; !ok { + // Hint is available, reserve it. + proto[hint] = struct{}{} + return hint, true + } + + // Search for any free port in [math.MinInt32, -4096). The positive + // port space is left open for pid-based allocations. This behavior is + // consistent with Linux. + start := int32(math.MinInt32 + rand.Int63n(math.MaxInt32-4096+1)) + curr := start + for { + if _, ok := proto[curr]; !ok { + proto[curr] = struct{}{} + return curr, true + } + + curr-- + if curr >= -4096 { + curr = -4097 + } + if curr == start { + // Nothing found. We should always find a free port + // because maxPorts < -4096 - MinInt32. + panic(fmt.Sprintf("No free port found in %+v", proto)) + } + } +} + +// Release frees the specified port for protocol. +// +// Preconditions: port is already allocated. +func (m *Manager) Release(protocol int, port int32) { + m.mu.Lock() + defer m.mu.Unlock() + + proto, ok := m.ports[protocol] + if !ok { + panic(fmt.Sprintf("Released port %d for protocol %d which has no allocations", port, protocol)) + } + + if _, ok := proto[port]; !ok { + panic(fmt.Sprintf("Released port %d for protocol %d is not allocated", port, protocol)) + } + + delete(proto, port) +} diff --git a/pkg/sentry/socket/netlink/port/port_test.go b/pkg/sentry/socket/netlink/port/port_test.go new file mode 100644 index 000000000..34565e2f9 --- /dev/null +++ b/pkg/sentry/socket/netlink/port/port_test.go @@ -0,0 +1,82 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package port + +import ( + "testing" +) + +func TestAllocateHint(t *testing.T) { + m := New() + + // We can get the hint port. + p, ok := m.Allocate(0, 1) + if !ok { + t.Errorf("m.Allocate got !ok want ok") + } + if p != 1 { + t.Errorf("m.Allocate(0, 1) got %d want 1", p) + } + + // Hint is taken. + p, ok = m.Allocate(0, 1) + if !ok { + t.Errorf("m.Allocate got !ok want ok") + } + if p == 1 { + t.Errorf("m.Allocate(0, 1) got 1 want anything else") + } + + // Hint is available for a different protocol. + p, ok = m.Allocate(1, 1) + if !ok { + t.Errorf("m.Allocate got !ok want ok") + } + if p != 1 { + t.Errorf("m.Allocate(1, 1) got %d want 1", p) + } + + m.Release(0, 1) + + // Hint is available again after release. + p, ok = m.Allocate(0, 1) + if !ok { + t.Errorf("m.Allocate got !ok want ok") + } + if p != 1 { + t.Errorf("m.Allocate(0, 1) got %d want 1", p) + } +} + +func TestAllocateExhausted(t *testing.T) { + m := New() + + // Fill all ports (0 is already reserved). + for i := int32(1); i < maxPorts; i++ { + p, ok := m.Allocate(0, i) + if !ok { + t.Fatalf("m.Allocate got !ok want ok") + } + if p != i { + t.Fatalf("m.Allocate(0, %d) got %d want %d", i, p, i) + } + } + + // Now no more can be allocated. + p, ok := m.Allocate(0, 1) + if ok { + t.Errorf("m.Allocate got %d, ok want !ok", p) + } +} diff --git a/pkg/sentry/socket/netlink/provider.go b/pkg/sentry/socket/netlink/provider.go new file mode 100644 index 000000000..36800da4d --- /dev/null +++ b/pkg/sentry/socket/netlink/provider.go @@ -0,0 +1,104 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package netlink + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// Protocol is the implementation of a netlink socket protocol. +type Protocol interface { + // Protocol returns the Linux netlink protocol value. + Protocol() int + + // ProcessMessage processes a single message from userspace. + // + // If err == nil, any messages added to ms will be sent back to the + // other end of the socket. Setting ms.Multi will cause an NLMSG_DONE + // message to be sent even if ms contains no messages. + ProcessMessage(ctx context.Context, hdr linux.NetlinkMessageHeader, data []byte, ms *MessageSet) *syserr.Error +} + +// Provider is a function that creates a new Protocol for a specific netlink +// protocol. +// +// Note that this is distinct from socket.Provider, which is used for all +// socket families. +type Provider func(t *kernel.Task) (Protocol, *syserr.Error) + +// protocols holds a map of all known address protocols and their provider. +var protocols = make(map[int]Provider) + +// RegisterProvider registers the provider of a given address protocol so that +// netlink sockets of that type can be created via socket(2). +// +// Preconditions: May only be called before any netlink sockets are created. +func RegisterProvider(protocol int, provider Provider) { + if p, ok := protocols[protocol]; ok { + panic(fmt.Sprintf("Netlink protocol %d already provided by %+v", protocol, p)) + } + + protocols[protocol] = provider +} + +// socketProvider implements socket.Provider. +type socketProvider struct { +} + +// Socket implements socket.Provider.Socket. +func (*socketProvider) Socket(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *syserr.Error) { + // Netlink sockets must be specified as datagram or raw, but they + // behave the same regardless of type. + if stype != unix.SockDgram && stype != unix.SockRaw { + return nil, syserr.ErrSocketNotSupported + } + + provider, ok := protocols[protocol] + if !ok { + return nil, syserr.ErrProtocolNotSupported + } + + p, err := provider(t) + if err != nil { + return nil, err + } + + s, err := NewSocket(t, p) + if err != nil { + return nil, err + } + + d := socket.NewDirent(t, netlinkSocketDevice) + return fs.NewFile(t, d, fs.FileFlags{Read: true, Write: true}, s), nil +} + +// Pair implements socket.Provider.Pair by returning an error. +func (*socketProvider) Pair(*kernel.Task, unix.SockType, int) (*fs.File, *fs.File, *syserr.Error) { + // Netlink sockets never supports creating socket pairs. + return nil, nil, syserr.ErrNotSupported +} + +// init registers the socket provider. +func init() { + socket.RegisterProvider(linux.AF_NETLINK, &socketProvider{}) +} diff --git a/pkg/sentry/socket/netlink/route/BUILD b/pkg/sentry/socket/netlink/route/BUILD new file mode 100644 index 000000000..ff3f7b7a4 --- /dev/null +++ b/pkg/sentry/socket/netlink/route/BUILD @@ -0,0 +1,33 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "route_state", + srcs = ["protocol.go"], + out = "route_state.go", + package = "route", +) + +go_library( + name = "route", + srcs = [ + "protocol.go", + "route_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink/route", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/socket/netlink", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + ], +) diff --git a/pkg/sentry/socket/netlink/route/protocol.go b/pkg/sentry/socket/netlink/route/protocol.go new file mode 100644 index 000000000..d611519d4 --- /dev/null +++ b/pkg/sentry/socket/netlink/route/protocol.go @@ -0,0 +1,189 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package route provides a NETLINK_ROUTE socket protocol. +package route + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink" + "gvisor.googlesource.com/gvisor/pkg/syserr" +) + +// commandKind describes the operational class of a message type. +// +// The route message types use the lower 2 bits of the type to describe class +// of command. +type commandKind int + +const ( + kindNew commandKind = 0x0 + kindDel = 0x1 + kindGet = 0x2 + kindSet = 0x3 +) + +func typeKind(typ uint16) commandKind { + return commandKind(typ & 0x3) +} + +// Protocol implements netlink.Protocol. +type Protocol struct { + // stack is the network stack that this provider describes. + // + // May be nil. + stack inet.Stack +} + +var _ netlink.Protocol = (*Protocol)(nil) + +// NewProtocol creates a NETLINK_ROUTE netlink.Protocol. +func NewProtocol(t *kernel.Task) (netlink.Protocol, *syserr.Error) { + return &Protocol{ + stack: t.NetworkContext(), + }, nil +} + +// Protocol implements netlink.Protocol.Protocol. +func (p *Protocol) Protocol() int { + return linux.NETLINK_ROUTE +} + +// dumpLinks handles RTM_GETLINK + NLM_F_DUMP requests. +func (p *Protocol) dumpLinks(ctx context.Context, hdr linux.NetlinkMessageHeader, data []byte, ms *netlink.MessageSet) *syserr.Error { + // NLM_F_DUMP + RTM_GETLINK messages are supposed to include an + // ifinfomsg. However, Linux <3.9 only checked for rtgenmsg, and some + // userspace applications (including glibc) still include rtgenmsg. + // Linux has a workaround based on the total message length. + // + // We don't bother to check for either, since we don't support any + // extra attributes that may be included anyways. + // + // The message may also contain netlink attribute IFLA_EXT_MASK, which + // we don't support. + + // The RTM_GETLINK dump response is a set of messages each containing + // an InterfaceInfoMessage followed by a set of netlink attributes. + + // We always send back an NLMSG_DONE. + ms.Multi = true + + if p.stack == nil { + // No network devices. + return nil + } + + for id, i := range p.stack.Interfaces() { + m := ms.AddMessage(linux.NetlinkMessageHeader{ + Type: linux.RTM_NEWLINK, + }) + + m.Put(linux.InterfaceInfoMessage{ + Family: linux.AF_UNSPEC, + Type: i.DeviceType, + Index: id, + Flags: i.Flags, + }) + + m.PutAttrString(linux.IFLA_IFNAME, i.Name) + + // TODO: There are many more attributes, such as + // MAC address. + } + + return nil +} + +// dumpAddrs handles RTM_GETADDR + NLM_F_DUMP requests. +func (p *Protocol) dumpAddrs(ctx context.Context, hdr linux.NetlinkMessageHeader, data []byte, ms *netlink.MessageSet) *syserr.Error { + // RTM_GETADDR dump requests need not contain anything more than the + // netlink header and 1 byte protocol family common to all + // NETLINK_ROUTE requests. + // + // TODO: Filter output by passed protocol family. + + // The RTM_GETADDR dump response is a set of RTM_NEWADDR messages each + // containing an InterfaceAddrMessage followed by a set of netlink + // attributes. + + // We always send back an NLMSG_DONE. + ms.Multi = true + + if p.stack == nil { + // No network devices. + return nil + } + + for id, as := range p.stack.InterfaceAddrs() { + for _, a := range as { + m := ms.AddMessage(linux.NetlinkMessageHeader{ + Type: linux.RTM_NEWADDR, + }) + + m.Put(linux.InterfaceAddrMessage{ + Family: a.Family, + PrefixLen: a.PrefixLen, + Index: uint32(id), + }) + + m.PutAttr(linux.IFA_ADDRESS, []byte(a.Addr)) + + // TODO: There are many more attributes. + } + } + + return nil +} + +// ProcessMessage implements netlink.Protocol.ProcessMessage. +func (p *Protocol) ProcessMessage(ctx context.Context, hdr linux.NetlinkMessageHeader, data []byte, ms *netlink.MessageSet) *syserr.Error { + // All messages start with a 1 byte protocol family. + if len(data) < 1 { + // Linux ignores messages missing the protocol family. See + // net/core/rtnetlink.c:rtnetlink_rcv_msg. + return nil + } + + // Non-GET message types require CAP_NET_ADMIN. + if typeKind(hdr.Type) != kindGet { + creds := auth.CredentialsFromContext(ctx) + if !creds.HasCapability(linux.CAP_NET_ADMIN) { + return syserr.ErrPermissionDenied + } + } + + // TODO: Only the dump variant of the types below are + // supported. + if hdr.Flags&linux.NLM_F_DUMP != linux.NLM_F_DUMP { + return syserr.ErrNotSupported + } + + switch hdr.Type { + case linux.RTM_GETLINK: + return p.dumpLinks(ctx, hdr, data, ms) + case linux.RTM_GETADDR: + return p.dumpAddrs(ctx, hdr, data, ms) + default: + return syserr.ErrNotSupported + } +} + +// init registers the NETLINK_ROUTE provider. +func init() { + netlink.RegisterProvider(linux.NETLINK_ROUTE, NewProtocol) +} diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go new file mode 100644 index 000000000..2d0e59ceb --- /dev/null +++ b/pkg/sentry/socket/netlink/socket.go @@ -0,0 +1,517 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netlink provides core functionality for netlink sockets. +package netlink + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink/port" + sunix "gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// defaultSendBufferSize is the default size for the send buffer. +const defaultSendBufferSize = 16 * 1024 + +// netlinkSocketDevice is the netlink socket virtual device. +var netlinkSocketDevice = device.NewAnonDevice() + +// Socket is the base socket type for netlink sockets. +// +// This implementation only supports userspace sending and receiving messages +// to/from the kernel. +// +// Socket implements socket.Socket. +type Socket struct { + socket.ReceiveTimeout + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + + // ports provides netlink port allocation. + ports *port.Manager + + // protocol is the netlink protocol implementation. + protocol Protocol + + // ep is a datagram unix endpoint used to buffer messages sent from the + // kernel to userspace. RecvMsg reads messages from this endpoint. + ep unix.Endpoint + + // connection is the kernel's connection to ep, used to write messages + // sent to userspace. + connection unix.ConnectedEndpoint + + // mu protects the fields below. + mu sync.Mutex `state:"nosave"` + + // bound indicates that portid is valid. + bound bool + + // portID is the port ID allocated for this socket. + portID int32 + + // sendBufferSize is the send buffer "size". We don't actually have a + // fixed buffer but only consume this many bytes. + sendBufferSize uint64 +} + +var _ socket.Socket = (*Socket)(nil) + +// NewSocket creates a new Socket. +func NewSocket(t *kernel.Task, protocol Protocol) (*Socket, *syserr.Error) { + // Datagram endpoint used to buffer kernel -> user messages. + ep := unix.NewConnectionless() + + // Bind the endpoint for good measure so we can connect to it. The + // bound address will never be exposed. + if terr := ep.Bind(tcpip.FullAddress{Addr: "dummy"}, nil); terr != nil { + ep.Close() + return nil, syserr.TranslateNetstackError(terr) + } + + // Create a connection from which the kernel can write messages. + connection, terr := ep.(unix.BoundEndpoint).UnidirectionalConnect() + if terr != nil { + ep.Close() + return nil, syserr.TranslateNetstackError(terr) + } + + return &Socket{ + ports: t.Kernel().NetlinkPorts(), + protocol: protocol, + ep: ep, + connection: connection, + sendBufferSize: defaultSendBufferSize, + }, nil +} + +// Release implements fs.FileOperations.Release. +func (s *Socket) Release() { + s.connection.Release() + s.ep.Close() + + if s.bound { + s.ports.Release(s.protocol.Protocol(), s.portID) + } +} + +// Readiness implements waiter.Waitable.Readiness. +func (s *Socket) Readiness(mask waiter.EventMask) waiter.EventMask { + // ep holds messages to be read and thus handles EventIn readiness. + ready := s.ep.Readiness(mask) + + if mask&waiter.EventOut == waiter.EventOut { + // sendMsg handles messages synchronously and is thus always + // ready for writing. + ready |= waiter.EventOut + } + + return ready +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (s *Socket) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + s.ep.EventRegister(e, mask) + // Writable readiness never changes, so no registration is needed. +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (s *Socket) EventUnregister(e *waiter.Entry) { + s.ep.EventUnregister(e) +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (s *Socket) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + // TODO: no ioctls supported. + return 0, syserror.ENOTTY +} + +// ExtractSockAddr extracts the SockAddrNetlink from b. +func ExtractSockAddr(b []byte) (*linux.SockAddrNetlink, *syserr.Error) { + if len(b) < linux.SockAddrNetlinkSize { + return nil, syserr.ErrBadAddress + } + + var sa linux.SockAddrNetlink + binary.Unmarshal(b[:linux.SockAddrNetlinkSize], usermem.ByteOrder, &sa) + + if sa.Family != linux.AF_NETLINK { + return nil, syserr.ErrInvalidArgument + } + + return &sa, nil +} + +// bindPort binds this socket to a port, preferring 'port' if it is available. +// +// port of 0 defaults to the ThreadGroup ID. +// +// Preconditions: mu is held. +func (s *Socket) bindPort(t *kernel.Task, port int32) *syserr.Error { + if s.bound { + // Re-binding is only allowed if the port doesn't change. + if port != s.portID { + return syserr.ErrInvalidArgument + } + + return nil + } + + if port == 0 { + port = int32(t.ThreadGroup().ID()) + } + port, ok := s.ports.Allocate(s.protocol.Protocol(), port) + if !ok { + return syserr.ErrBusy + } + + s.portID = port + s.bound = true + return nil +} + +// Bind implements socket.Socket.Bind. +func (s *Socket) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { + a, err := ExtractSockAddr(sockaddr) + if err != nil { + return err + } + + // No support for multicast groups yet. + if a.Groups != 0 { + return syserr.ErrPermissionDenied + } + + s.mu.Lock() + defer s.mu.Unlock() + + return s.bindPort(t, int32(a.PortID)) +} + +// Connect implements socket.Socket.Connect. +func (s *Socket) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error { + a, err := ExtractSockAddr(sockaddr) + if err != nil { + return err + } + + // No support for multicast groups yet. + if a.Groups != 0 { + return syserr.ErrPermissionDenied + } + + s.mu.Lock() + defer s.mu.Unlock() + + if a.PortID == 0 { + // Netlink sockets default to connected to the kernel, but + // connecting anyways automatically binds if not already bound. + if !s.bound { + // Pass port 0 to get an auto-selected port ID. + return s.bindPort(t, 0) + } + return nil + } + + // We don't support non-kernel destination ports. Linux returns EPERM + // if applications attempt to do this without NL_CFG_F_NONROOT_SEND, so + // we emulate that. + return syserr.ErrPermissionDenied +} + +// Accept implements socket.Socket.Accept. +func (s *Socket) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) { + // Netlink sockets never support accept. + return 0, nil, 0, syserr.ErrNotSupported +} + +// Listen implements socket.Socket.Listen. +func (s *Socket) Listen(t *kernel.Task, backlog int) *syserr.Error { + // Netlink sockets never support listen. + return syserr.ErrNotSupported +} + +// Shutdown implements socket.Socket.Shutdown. +func (s *Socket) Shutdown(t *kernel.Task, how int) *syserr.Error { + // Netlink sockets never support shutdown. + return syserr.ErrNotSupported +} + +// GetSockOpt implements socket.Socket.GetSockOpt. +func (s *Socket) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) { + // TODO: no sockopts supported. + return nil, syserr.ErrProtocolNotAvailable +} + +// SetSockOpt implements socket.Socket.SetSockOpt. +func (s *Socket) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error { + // TODO: no sockopts supported. + return syserr.ErrProtocolNotAvailable +} + +// GetSockName implements socket.Socket.GetSockName. +func (s *Socket) GetSockName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + s.mu.Lock() + defer s.mu.Unlock() + + sa := linux.SockAddrNetlink{ + Family: linux.AF_NETLINK, + PortID: uint32(s.portID), + } + return sa, uint32(binary.Size(sa)), nil +} + +// GetPeerName implements socket.Socket.GetPeerName. +func (s *Socket) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + sa := linux.SockAddrNetlink{ + Family: linux.AF_NETLINK, + // TODO: Support non-kernel peers. For now the peer + // must be the kernel. + PortID: 0, + } + return sa, uint32(binary.Size(sa)), nil +} + +// RecvMsg implements socket.Socket.RecvMsg. +func (s *Socket) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, unix.ControlMessages, *syserr.Error) { + from := linux.SockAddrNetlink{ + Family: linux.AF_NETLINK, + PortID: 0, + } + fromLen := uint32(binary.Size(from)) + + trunc := flags&linux.MSG_TRUNC != 0 + + r := sunix.EndpointReader{ + Endpoint: s.ep, + Peek: flags&linux.MSG_PEEK != 0, + } + + if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + if trunc { + n = int64(r.MsgSize) + } + return int(n), from, fromLen, unix.ControlMessages{}, syserr.FromError(err) + } + + // We'll have to block. Register for notification and keep trying to + // receive all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + for { + if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock { + if trunc { + n = int64(r.MsgSize) + } + return int(n), from, fromLen, unix.ControlMessages{}, syserr.FromError(err) + } + + if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + if err == syserror.ETIMEDOUT { + return 0, nil, 0, unix.ControlMessages{}, syserr.ErrTryAgain + } + return 0, nil, 0, unix.ControlMessages{}, syserr.FromError(err) + } + } +} + +// Read implements fs.FileOperations.Read. +func (s *Socket) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() == 0 { + return 0, nil + } + return dst.CopyOutFrom(ctx, &sunix.EndpointReader{ + Endpoint: s.ep, + }) +} + +// sendResponse sends the response messages in ms back to userspace. +func (s *Socket) sendResponse(ctx context.Context, ms *MessageSet) *syserr.Error { + // Linux combines multiple netlink messages into a single datagram. + bufs := make([][]byte, 0, len(ms.Messages)) + for _, m := range ms.Messages { + bufs = append(bufs, m.Finalize()) + } + + if len(bufs) > 0 { + // RecvMsg never receives the address, so we don't need to send + // one. + _, notify, terr := s.connection.Send(bufs, unix.ControlMessages{}, tcpip.FullAddress{}) + // If the buffer is full, we simply drop messages, just like + // Linux. + if terr != nil && terr != tcpip.ErrWouldBlock { + return syserr.TranslateNetstackError(terr) + } + if notify { + s.connection.SendNotify() + } + } + + // N.B. multi-part messages should still send NLMSG_DONE even if + // MessageSet contains no messages. + // + // N.B. NLMSG_DONE is always sent in a different datagram. See + // net/netlink/af_netlink.c:netlink_dump. + if ms.Multi { + m := NewMessage(linux.NetlinkMessageHeader{ + Type: linux.NLMSG_DONE, + Flags: linux.NLM_F_MULTI, + Seq: ms.Seq, + PortID: uint32(ms.PortID), + }) + + _, notify, terr := s.connection.Send([][]byte{m.Finalize()}, unix.ControlMessages{}, tcpip.FullAddress{}) + if terr != nil && terr != tcpip.ErrWouldBlock { + return syserr.TranslateNetstackError(terr) + } + if notify { + s.connection.SendNotify() + } + } + + return nil +} + +// processMessages handles each message in buf, passing it to the protocol +// handler for final handling. +func (s *Socket) processMessages(ctx context.Context, buf []byte) *syserr.Error { + for len(buf) > 0 { + if len(buf) < linux.NetlinkMessageHeaderSize { + // Linux ignores messages that are too short. See + // net/netlink/af_netlink.c:netlink_rcv_skb. + break + } + + var hdr linux.NetlinkMessageHeader + binary.Unmarshal(buf[:linux.NetlinkMessageHeaderSize], usermem.ByteOrder, &hdr) + + if hdr.Length < linux.NetlinkMessageHeaderSize || uint64(hdr.Length) > uint64(len(buf)) { + // Linux ignores malformed messages. See + // net/netlink/af_netlink.c:netlink_rcv_skb. + break + } + + // Data from this message. + data := buf[linux.NetlinkMessageHeaderSize:hdr.Length] + + // Advance to the next message. + next := alignUp(int(hdr.Length), linux.NLMSG_ALIGNTO) + if next >= len(buf)-1 { + next = len(buf) - 1 + } + buf = buf[next:] + + // Ignore control messages. + if hdr.Type < linux.NLMSG_MIN_TYPE { + continue + } + + // TODO: ACKs not supported yet. + if hdr.Flags&linux.NLM_F_ACK == linux.NLM_F_ACK { + return syserr.ErrNotSupported + } + + ms := NewMessageSet(s.portID, hdr.Seq) + if err := s.protocol.ProcessMessage(ctx, hdr, data, ms); err != nil { + return err + } + + if err := s.sendResponse(ctx, ms); err != nil { + return err + } + } + + return nil +} + +// sendMsg is the core of message send, used for SendMsg and Write. +func (s *Socket) sendMsg(ctx context.Context, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + dstPort := int32(0) + + if len(to) != 0 { + a, err := ExtractSockAddr(to) + if err != nil { + return 0, err + } + + // No support for multicast groups yet. + if a.Groups != 0 { + return 0, syserr.ErrPermissionDenied + } + + dstPort = int32(a.PortID) + } + + if dstPort != 0 { + // Non-kernel destinations not supported yet. Treat as if + // NL_CFG_F_NONROOT_SEND is not set. + return 0, syserr.ErrPermissionDenied + } + + s.mu.Lock() + defer s.mu.Unlock() + + // For simplicity, and consistency with Linux, we copy in the entire + // message up front. + if uint64(src.NumBytes()) > s.sendBufferSize { + return 0, syserr.ErrMessageTooLong + } + + buf := make([]byte, src.NumBytes()) + n, err := src.CopyIn(ctx, buf) + if err != nil { + // Don't partially consume messages. + return 0, syserr.FromError(err) + } + + if err := s.processMessages(ctx, buf); err != nil { + return 0, err + } + + return n, nil +} + +// SendMsg implements socket.Socket.SendMsg. +func (s *Socket) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + return s.sendMsg(t, src, to, flags, controlMessages) +} + +// Write implements fs.FileOperations.Write. +func (s *Socket) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + n, err := s.sendMsg(ctx, src, nil, 0, unix.ControlMessages{}) + return int64(n), err.ToError() +} diff --git a/pkg/sentry/socket/rpcinet/BUILD b/pkg/sentry/socket/rpcinet/BUILD new file mode 100644 index 000000000..b0351b363 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/BUILD @@ -0,0 +1,59 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "rpcinet", + srcs = [ + "device.go", + "rpcinet.go", + "socket.go", + "stack.go", + "stack_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet", + visibility = ["//pkg/sentry:internal"], + deps = [ + ":syscall_rpc_go_proto", + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/socket", + "//pkg/sentry/socket/hostinet", + "//pkg/sentry/socket/rpcinet/conn", + "//pkg/sentry/socket/rpcinet/notifier", + "//pkg/sentry/usermem", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip/buffer", + "//pkg/tcpip/transport/unix", + "//pkg/unet", + "//pkg/waiter", + ], +) + +proto_library( + name = "syscall_rpc_proto", + srcs = ["syscall_rpc.proto"], + visibility = [ + "//visibility:public", + ], +) + +go_proto_library( + name = "syscall_rpc_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/syscall_rpc_go_proto", + proto = ":syscall_rpc_proto", + visibility = [ + "//visibility:public", + ], +) diff --git a/pkg/sentry/socket/rpcinet/conn/BUILD b/pkg/sentry/socket/rpcinet/conn/BUILD new file mode 100644 index 000000000..4923dee4b --- /dev/null +++ b/pkg/sentry/socket/rpcinet/conn/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "conn", + srcs = ["conn.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/conn", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/binary", + "//pkg/sentry/socket/rpcinet:syscall_rpc_go_proto", + "//pkg/syserr", + "//pkg/unet", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) diff --git a/pkg/sentry/socket/rpcinet/conn/conn.go b/pkg/sentry/socket/rpcinet/conn/conn.go new file mode 100644 index 000000000..ea6ec87ed --- /dev/null +++ b/pkg/sentry/socket/rpcinet/conn/conn.go @@ -0,0 +1,167 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package conn is an RPC connection to a syscall RPC server. +package conn + +import ( + "fmt" + "sync" + "sync/atomic" + "syscall" + + "github.com/golang/protobuf/proto" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/unet" + + pb "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/syscall_rpc_go_proto" +) + +type request struct { + response []byte + ready chan struct{} + ignoreResult bool +} + +// RPCConnection represents a single RPC connection to a syscall gofer. +type RPCConnection struct { + // reqID is the ID of the last request and must be accessed atomically. + reqID uint64 + + sendMu sync.Mutex + socket *unet.Socket + + reqMu sync.Mutex + requests map[uint64]request +} + +// NewRPCConnection initializes a RPC connection to a socket gofer. +func NewRPCConnection(s *unet.Socket) *RPCConnection { + conn := &RPCConnection{socket: s, requests: map[uint64]request{}} + go func() { // S/R-FIXME + var nums [16]byte + for { + for n := 0; n < len(nums); { + nn, err := conn.socket.Read(nums[n:]) + if err != nil { + panic(fmt.Sprint("error reading length from socket rpc gofer: ", err)) + } + n += nn + } + + b := make([]byte, binary.LittleEndian.Uint64(nums[:8])) + id := binary.LittleEndian.Uint64(nums[8:]) + + for n := 0; n < len(b); { + nn, err := conn.socket.Read(b[n:]) + if err != nil { + panic(fmt.Sprint("error reading request from socket rpc gofer: ", err)) + } + n += nn + } + + conn.reqMu.Lock() + r := conn.requests[id] + if r.ignoreResult { + delete(conn.requests, id) + } else { + r.response = b + conn.requests[id] = r + } + conn.reqMu.Unlock() + close(r.ready) + } + }() + return conn +} + +// NewRequest makes a request to the RPC gofer and returns the request ID and a +// channel which will be closed once the request completes. +func (c *RPCConnection) NewRequest(req pb.SyscallRequest, ignoreResult bool) (uint64, chan struct{}) { + b, err := proto.Marshal(&req) + if err != nil { + panic(fmt.Sprint("invalid proto: ", err)) + } + + id := atomic.AddUint64(&c.reqID, 1) + ch := make(chan struct{}) + + c.reqMu.Lock() + c.requests[id] = request{ready: ch, ignoreResult: ignoreResult} + c.reqMu.Unlock() + + c.sendMu.Lock() + defer c.sendMu.Unlock() + + var nums [16]byte + binary.LittleEndian.PutUint64(nums[:8], uint64(len(b))) + binary.LittleEndian.PutUint64(nums[8:], id) + for n := 0; n < len(nums); { + nn, err := c.socket.Write(nums[n:]) + if err != nil { + panic(fmt.Sprint("error writing length and ID to socket gofer: ", err)) + } + n += nn + } + + for n := 0; n < len(b); { + nn, err := c.socket.Write(b[n:]) + if err != nil { + panic(fmt.Sprint("error writing request to socket gofer: ", err)) + } + n += nn + } + + return id, ch +} + +// RPCReadFile will execute the ReadFile helper RPC method which avoids the +// common pattern of open(2), read(2), close(2) by doing all three operations +// as a single RPC. It will read the entire file or return EFBIG if the file +// was too large. +func (c *RPCConnection) RPCReadFile(path string) ([]byte, *syserr.Error) { + req := &pb.SyscallRequest_ReadFile{&pb.ReadFileRequest{ + Path: path, + }} + + id, ch := c.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-ch + + res := c.Request(id).Result.(*pb.SyscallResponse_ReadFile).ReadFile.Result + if e, ok := res.(*pb.ReadFileResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.ReadFileResponse_Data).Data, nil +} + +// Request retrieves the request corresponding to the given request ID. +// +// The channel returned by NewRequest must have been closed before Request can +// be called. This will happen automatically, do not manually close the +// channel. +func (c *RPCConnection) Request(id uint64) pb.SyscallResponse { + c.reqMu.Lock() + r := c.requests[id] + delete(c.requests, id) + c.reqMu.Unlock() + + var resp pb.SyscallResponse + if err := proto.Unmarshal(r.response, &resp); err != nil { + panic(fmt.Sprint("invalid proto: ", err)) + } + + return resp +} diff --git a/pkg/sentry/socket/rpcinet/device.go b/pkg/sentry/socket/rpcinet/device.go new file mode 100644 index 000000000..f7b63436e --- /dev/null +++ b/pkg/sentry/socket/rpcinet/device.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcinet + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +var socketDevice = device.NewAnonDevice() diff --git a/pkg/sentry/socket/rpcinet/notifier/BUILD b/pkg/sentry/socket/rpcinet/notifier/BUILD new file mode 100644 index 000000000..6f3b06a05 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/notifier/BUILD @@ -0,0 +1,15 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "notifier", + srcs = ["notifier.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/notifier", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/sentry/socket/rpcinet:syscall_rpc_go_proto", + "//pkg/sentry/socket/rpcinet/conn", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/socket/rpcinet/notifier/notifier.go b/pkg/sentry/socket/rpcinet/notifier/notifier.go new file mode 100644 index 000000000..f88a908ed --- /dev/null +++ b/pkg/sentry/socket/rpcinet/notifier/notifier.go @@ -0,0 +1,230 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package notifier implements an FD notifier implementation over RPC. +package notifier + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/conn" + pb "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/syscall_rpc_go_proto" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +type fdInfo struct { + queue *waiter.Queue + waiting bool +} + +// Notifier holds all the state necessary to issue notifications when IO events +// occur in the observed FDs. +type Notifier struct { + // rpcConn is the connection that is used for sending RPCs. + rpcConn *conn.RPCConnection + + // epFD is the epoll file descriptor used to register for io + // notifications. + epFD uint32 + + // mu protects fdMap. + mu sync.Mutex + + // fdMap maps file descriptors to their notification queues and waiting + // status. + fdMap map[uint32]*fdInfo +} + +// NewRPCNotifier creates a new notifier object. +func NewRPCNotifier(cn *conn.RPCConnection) (*Notifier, error) { + id, c := cn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_EpollCreate1{&pb.EpollCreate1Request{}}}, false /* ignoreResult */) + <-c + + res := cn.Request(id).Result.(*pb.SyscallResponse_EpollCreate1).EpollCreate1.Result + if e, ok := res.(*pb.EpollCreate1Response_ErrorNumber); ok { + return nil, syscall.Errno(e.ErrorNumber) + } + + w := &Notifier{ + rpcConn: cn, + epFD: res.(*pb.EpollCreate1Response_Fd).Fd, + fdMap: make(map[uint32]*fdInfo), + } + + go w.waitAndNotify() // S/R-FIXME + + return w, nil +} + +// waitFD waits on mask for fd. The fdMap mutex must be hold. +func (n *Notifier) waitFD(fd uint32, fi *fdInfo, mask waiter.EventMask) error { + if !fi.waiting && mask == 0 { + return nil + } + + e := pb.EpollEvent{ + Events: uint32(mask) | -syscall.EPOLLET, + Fd: fd, + } + + switch { + case !fi.waiting && mask != 0: + id, c := n.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_EpollCtl{&pb.EpollCtlRequest{Epfd: n.epFD, Op: syscall.EPOLL_CTL_ADD, Fd: fd, Event: &e}}}, false /* ignoreResult */) + <-c + + e := n.rpcConn.Request(id).Result.(*pb.SyscallResponse_EpollCtl).EpollCtl.ErrorNumber + if e != 0 { + return syscall.Errno(e) + } + + fi.waiting = true + case fi.waiting && mask == 0: + id, c := n.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_EpollCtl{&pb.EpollCtlRequest{Epfd: n.epFD, Op: syscall.EPOLL_CTL_DEL, Fd: fd}}}, false /* ignoreResult */) + <-c + n.rpcConn.Request(id) + + fi.waiting = false + case fi.waiting && mask != 0: + id, c := n.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_EpollCtl{&pb.EpollCtlRequest{Epfd: n.epFD, Op: syscall.EPOLL_CTL_MOD, Fd: fd, Event: &e}}}, false /* ignoreResult */) + <-c + + e := n.rpcConn.Request(id).Result.(*pb.SyscallResponse_EpollCtl).EpollCtl.ErrorNumber + if e != 0 { + return syscall.Errno(e) + } + } + + return nil +} + +// addFD adds an FD to the list of FDs observed by n. +func (n *Notifier) addFD(fd uint32, queue *waiter.Queue) { + n.mu.Lock() + defer n.mu.Unlock() + + // Panic if we're already notifying on this FD. + if _, ok := n.fdMap[fd]; ok { + panic(fmt.Sprintf("File descriptor %d added twice", fd)) + } + + // We have nothing to wait for at the moment. Just add it to the map. + n.fdMap[fd] = &fdInfo{queue: queue} +} + +// updateFD updates the set of events the FD needs to be notified on. +func (n *Notifier) updateFD(fd uint32) error { + n.mu.Lock() + defer n.mu.Unlock() + + if fi, ok := n.fdMap[fd]; ok { + return n.waitFD(fd, fi, fi.queue.Events()) + } + + return nil +} + +// RemoveFD removes an FD from the list of FDs observed by n. +func (n *Notifier) removeFD(fd uint32) { + n.mu.Lock() + defer n.mu.Unlock() + + // Remove from map, then from epoll object. + n.waitFD(fd, n.fdMap[fd], 0) + delete(n.fdMap, fd) +} + +// hasFD returns true if the FD is in the list of observed FDs. +func (n *Notifier) hasFD(fd uint32) bool { + n.mu.Lock() + defer n.mu.Unlock() + + _, ok := n.fdMap[fd] + return ok +} + +// waitAndNotify loops waiting for io event notifications from the epoll +// object. Once notifications arrive, they are dispatched to the +// registered queue. +func (n *Notifier) waitAndNotify() error { + for { + id, c := n.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_EpollWait{&pb.EpollWaitRequest{Fd: n.epFD, NumEvents: 100, Msec: -1}}}, false /* ignoreResult */) + <-c + + res := n.rpcConn.Request(id).Result.(*pb.SyscallResponse_EpollWait).EpollWait.Result + if e, ok := res.(*pb.EpollWaitResponse_ErrorNumber); ok { + err := syscall.Errno(e.ErrorNumber) + // NOTE: I don't think epoll_wait can return EAGAIN but I'm being + // conseratively careful here since exiting the notification thread + // would be really bad. + if err == syscall.EINTR || err == syscall.EAGAIN { + continue + } + return err + } + + n.mu.Lock() + for _, e := range res.(*pb.EpollWaitResponse_Events).Events.Events { + if fi, ok := n.fdMap[e.Fd]; ok { + fi.queue.Notify(waiter.EventMask(e.Events)) + } + } + n.mu.Unlock() + } +} + +// AddFD adds an FD to the list of observed FDs. +func (n *Notifier) AddFD(fd uint32, queue *waiter.Queue) error { + n.addFD(fd, queue) + return nil +} + +// UpdateFD updates the set of events the FD needs to be notified on. +func (n *Notifier) UpdateFD(fd uint32) error { + return n.updateFD(fd) +} + +// RemoveFD removes an FD from the list of observed FDs. +func (n *Notifier) RemoveFD(fd uint32) { + n.removeFD(fd) +} + +// HasFD returns true if the FD is in the list of observed FDs. +// +// This should only be used by tests to assert that FDs are correctly +// registered. +func (n *Notifier) HasFD(fd uint32) bool { + return n.hasFD(fd) +} + +// NonBlockingPoll polls the given fd in non-blocking fashion. It is used just +// to query the FD's current state; this method will block on the RPC response +// although the syscall is non-blocking. +func (n *Notifier) NonBlockingPoll(fd uint32, mask waiter.EventMask) waiter.EventMask { + for { + id, c := n.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Poll{&pb.PollRequest{Fd: fd, Events: uint32(mask)}}}, false /* ignoreResult */) + <-c + + res := n.rpcConn.Request(id).Result.(*pb.SyscallResponse_Poll).Poll.Result + if e, ok := res.(*pb.PollResponse_ErrorNumber); ok { + if syscall.Errno(e.ErrorNumber) == syscall.EINTR { + continue + } + return mask + } + + return waiter.EventMask(res.(*pb.PollResponse_Events).Events) + } +} diff --git a/pkg/sentry/socket/rpcinet/rpcinet.go b/pkg/sentry/socket/rpcinet/rpcinet.go new file mode 100644 index 000000000..10b0dedc2 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/rpcinet.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpcinet implements sockets using an RPC for each syscall. +package rpcinet diff --git a/pkg/sentry/socket/rpcinet/socket.go b/pkg/sentry/socket/rpcinet/socket.go new file mode 100644 index 000000000..574d99ba5 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/socket.go @@ -0,0 +1,567 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcinet + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/conn" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/notifier" + pb "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/syscall_rpc_go_proto" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// socketOperations implements fs.FileOperations and socket.Socket for a socket +// implemented using a host socket. +type socketOperations struct { + socket.ReceiveTimeout + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + + fd uint32 // must be O_NONBLOCK + wq *waiter.Queue + rpcConn *conn.RPCConnection + notifier *notifier.Notifier +} + +// Verify that we actually implement socket.Socket. +var _ = socket.Socket(&socketOperations{}) + +// New creates a new RPC socket. +func newSocketFile(ctx context.Context, stack *Stack, family int, skType int, protocol int) (*fs.File, *syserr.Error) { + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Socket{&pb.SocketRequest{Family: int64(family), Type: int64(skType | syscall.SOCK_NONBLOCK), Protocol: int64(protocol)}}}, false /* ignoreResult */) + <-c + + res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Socket).Socket.Result + if e, ok := res.(*pb.SocketResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + fd := res.(*pb.SocketResponse_Fd).Fd + + var wq waiter.Queue + stack.notifier.AddFD(fd, &wq) + + dirent := socket.NewDirent(ctx, socketDevice) + return fs.NewFile(ctx, dirent, fs.FileFlags{Read: true, Write: true}, &socketOperations{ + wq: &wq, + fd: fd, + rpcConn: stack.rpcConn, + notifier: stack.notifier, + }), nil +} + +func isBlockingErrno(err error) bool { + return err == syscall.EAGAIN || err == syscall.EWOULDBLOCK +} + +func translateIOSyscallError(err error) error { + if isBlockingErrno(err) { + return syserror.ErrWouldBlock + } + return err +} + +// Release implements fs.FileOperations.Release. +func (s *socketOperations) Release() { + s.notifier.RemoveFD(s.fd) + + // We always need to close the FD. + _, _ = s.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Close{&pb.CloseRequest{Fd: s.fd}}}, true /* ignoreResult */) +} + +// Readiness implements waiter.Waitable.Readiness. +func (s *socketOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return s.notifier.NonBlockingPoll(s.fd, mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (s *socketOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + s.wq.EventRegister(e, mask) + s.notifier.UpdateFD(s.fd) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (s *socketOperations) EventUnregister(e *waiter.Entry) { + s.wq.EventUnregister(e) + s.notifier.UpdateFD(s.fd) +} + +func rpcRead(t *kernel.Task, req *pb.SyscallRequest_Read) (*pb.ReadResponse_Data, *syserr.Error) { + s := t.NetworkContext().(*Stack) + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Read).Read.Result + if e, ok := res.(*pb.ReadResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.ReadResponse_Data), nil +} + +// Read implements fs.FileOperations.Read. +func (s *socketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + req := &pb.SyscallRequest_Read{&pb.ReadRequest{ + Fd: s.fd, + Length: uint32(dst.NumBytes()), + }} + + res, se := rpcRead(ctx.(*kernel.Task), req) + if se == nil { + n, e := dst.CopyOut(ctx, res.Data) + return int64(n), e + } + if se != syserr.ErrWouldBlock { + return 0, se.ToError() + } + + // We'll have to block. Register for notifications and read again when ready. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + for { + res, se := rpcRead(ctx.(*kernel.Task), req) + if se == nil { + n, e := dst.CopyOut(ctx, res.Data) + return int64(n), e + } + if se != syserr.ErrWouldBlock { + return 0, se.ToError() + } + + if err := ctx.(*kernel.Task).Block(ch); err != nil { + return 0, err + } + } +} + +func rpcWrite(t *kernel.Task, req *pb.SyscallRequest_Write) (uint32, *syserr.Error) { + s := t.NetworkContext().(*Stack) + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Write).Write.Result + if e, ok := res.(*pb.WriteResponse_ErrorNumber); ok { + return 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.WriteResponse_Length).Length, nil +} + +// Write implements fs.FileOperations.Write. +func (s *socketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + t := ctx.(*kernel.Task) + v := buffer.NewView(int(src.NumBytes())) + + // Copy all the data into the buffer. + if _, err := src.CopyIn(t, v); err != nil { + return 0, err + } + + n, err := rpcWrite(t, &pb.SyscallRequest_Write{&pb.WriteRequest{Fd: s.fd, Data: v}}) + return int64(n), err.ToError() +} + +func rpcConnect(t *kernel.Task, fd uint32, sockaddr []byte) *syserr.Error { + s := t.NetworkContext().(*Stack) + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Connect{&pb.ConnectRequest{Fd: uint32(fd), Address: sockaddr}}}, false /* ignoreResult */) + <-c + + if e := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Connect).Connect.ErrorNumber; e != 0 { + return syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +// Connect implements socket.Socket.Connect. +func (s *socketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error { + if !blocking { + return rpcConnect(t, s.fd, sockaddr) + } + + // Register for notification when the endpoint becomes writable, then + // initiate the connection. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + + if err := rpcConnect(t, s.fd, sockaddr); err != syserr.ErrConnectStarted && err != syserr.ErrAlreadyConnecting { + return err + } + + // It's pending, so we have to wait for a notification, and fetch the + // result once the wait completes. + if err := t.Block(ch); err != nil { + return syserr.FromError(err) + } + + // Call Connect() again after blocking to find connect's result. + return rpcConnect(t, s.fd, sockaddr) +} + +func rpcAccept(t *kernel.Task, fd uint32, peer bool) (*pb.AcceptResponse_ResultPayload, *syserr.Error) { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Accept{&pb.AcceptRequest{Fd: fd, Peer: peer, Flags: syscall.SOCK_NONBLOCK}}}, false /* ignoreResult */) + <-c + + res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Accept).Accept.Result + if e, ok := res.(*pb.AcceptResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + return res.(*pb.AcceptResponse_Payload).Payload, nil +} + +// Accept implements socket.Socket.Accept. +func (s *socketOperations) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) { + payload, se := rpcAccept(t, s.fd, peerRequested) + + // Check if we need to block. + if blocking && se == syserr.ErrWouldBlock { + // Register for notifications. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + // Try to accept the connection again; if it fails, then wait until we + // get a notification. + for { + if payload, se = rpcAccept(t, s.fd, peerRequested); se != syserr.ErrWouldBlock { + break + } + + if err := t.Block(ch); err != nil { + return 0, nil, 0, syserr.FromError(err) + } + } + } + + // Handle any error from accept. + if se != nil { + return 0, nil, 0, se + } + + var wq waiter.Queue + s.notifier.AddFD(payload.Fd, &wq) + + dirent := socket.NewDirent(t, socketDevice) + file := fs.NewFile(t, dirent, fs.FileFlags{Read: true, Write: true, NonBlocking: flags&linux.SOCK_NONBLOCK != 0}, &socketOperations{ + wq: &wq, + fd: payload.Fd, + notifier: s.notifier, + }) + + fdFlags := kernel.FDFlags{ + CloseOnExec: flags&linux.SOCK_CLOEXEC != 0, + } + fd, err := t.FDMap().NewFDFrom(0, file, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, 0, syserr.FromError(err) + } + + return fd, payload.Address.Address, payload.Address.Length, nil +} + +// Bind implements socket.Socket.Bind. +func (s *socketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Bind{&pb.BindRequest{Fd: s.fd, Address: sockaddr}}}, false /* ignoreResult */) + <-c + + if e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Bind).Bind.ErrorNumber; e != 0 { + syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +// Listen implements socket.Socket.Listen. +func (s *socketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Listen{&pb.ListenRequest{Fd: s.fd, Backlog: int64(backlog)}}}, false /* ignoreResult */) + <-c + + if e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Listen).Listen.ErrorNumber; e != 0 { + syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +// Shutdown implements socket.Socket.Shutdown. +func (s *socketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Shutdown{&pb.ShutdownRequest{Fd: s.fd, How: int64(how)}}}, false /* ignoreResult */) + <-c + + if e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_Shutdown).Shutdown.ErrorNumber; e != 0 { + return syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +// GetSockOpt implements socket.Socket.GetSockOpt. +func (s *socketOperations) GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_GetSockOpt{&pb.GetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Length: uint32(outLen)}}}, false /* ignoreResult */) + <-c + + res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_GetSockOpt).GetSockOpt.Result + if e, ok := res.(*pb.GetSockOptResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.GetSockOptResponse_Opt).Opt, nil +} + +// SetSockOpt implements socket.Socket.SetSockOpt. +func (s *socketOperations) SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_SetSockOpt{&pb.SetSockOptRequest{Fd: s.fd, Level: int64(level), Name: int64(name), Opt: opt}}}, false /* ignoreResult */) + <-c + + if e := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_SetSockOpt).SetSockOpt.ErrorNumber; e != 0 { + syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +// GetPeerName implements socket.Socket.GetPeerName. +func (s *socketOperations) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_GetPeerName{&pb.GetPeerNameRequest{Fd: s.fd}}}, false /* ignoreResult */) + <-c + + res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_GetPeerName).GetPeerName.Result + if e, ok := res.(*pb.GetPeerNameResponse_ErrorNumber); ok { + return nil, 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + addr := res.(*pb.GetPeerNameResponse_Address).Address + return addr.Address, addr.Length, nil +} + +// GetSockName implements socket.Socket.GetSockName. +func (s *socketOperations) GetSockName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + stack := t.NetworkContext().(*Stack) + id, c := stack.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_GetSockName{&pb.GetSockNameRequest{Fd: s.fd}}}, false /* ignoreResult */) + <-c + + res := stack.rpcConn.Request(id).Result.(*pb.SyscallResponse_GetSockName).GetSockName.Result + if e, ok := res.(*pb.GetSockNameResponse_ErrorNumber); ok { + return nil, 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + addr := res.(*pb.GetSockNameResponse_Address).Address + return addr.Address, addr.Length, nil +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (s *socketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + return 0, syserror.ENOTTY +} + +func rpcRecvMsg(t *kernel.Task, req *pb.SyscallRequest_Recvmsg) (*pb.RecvmsgResponse_ResultPayload, *syserr.Error) { + s := t.NetworkContext().(*Stack) + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Recvmsg).Recvmsg.Result + if e, ok := res.(*pb.RecvmsgResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.RecvmsgResponse_Payload).Payload, nil +} + +// RecvMsg implements socket.Socket.RecvMsg. +func (s *socketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (int, interface{}, uint32, unix.ControlMessages, *syserr.Error) { + req := &pb.SyscallRequest_Recvmsg{&pb.RecvmsgRequest{ + Fd: s.fd, + Length: uint32(dst.NumBytes()), + Sender: senderRequested, + Trunc: flags&linux.MSG_TRUNC != 0, + Peek: flags&linux.MSG_PEEK != 0, + }} + + res, err := rpcRecvMsg(t, req) + if err == nil { + n, e := dst.CopyOut(t, res.Data) + return int(n), res.Address.GetAddress(), res.Address.GetLength(), unix.ControlMessages{}, syserr.FromError(e) + } + if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + return 0, nil, 0, unix.ControlMessages{}, err + } + + // We'll have to block. Register for notifications and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + for { + res, err := rpcRecvMsg(t, req) + if err == nil { + n, e := dst.CopyOut(t, res.Data) + return int(n), res.Address.GetAddress(), res.Address.GetLength(), unix.ControlMessages{}, syserr.FromError(e) + } + if err != syserr.ErrWouldBlock { + return 0, nil, 0, unix.ControlMessages{}, err + } + + if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + if err == syserror.ETIMEDOUT { + return 0, nil, 0, unix.ControlMessages{}, syserr.ErrTryAgain + } + return 0, nil, 0, unix.ControlMessages{}, syserr.FromError(err) + } + } +} + +func rpcSendMsg(t *kernel.Task, req *pb.SyscallRequest_Sendmsg) (uint32, *syserr.Error) { + s := t.NetworkContext().(*Stack) + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Sendmsg).Sendmsg.Result + if e, ok := res.(*pb.SendmsgResponse_ErrorNumber); ok { + return 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.SendmsgResponse_Length).Length, nil +} + +// SendMsg implements socket.Socket.SendMsg. +func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + // Whitelist flags. + if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 { + return 0, syserr.ErrInvalidArgument + } + + // Reject control messages. + if !controlMessages.Empty() { + return 0, syserr.ErrInvalidArgument + } + + v := buffer.NewView(int(src.NumBytes())) + + // Copy all the data into the buffer. + if _, err := src.CopyIn(t, v); err != nil { + return 0, syserr.FromError(err) + } + + // TODO: this needs to change to map directly to a SendMsg syscall + // in the RPC. + req := &pb.SyscallRequest_Sendmsg{&pb.SendmsgRequest{ + Fd: uint32(s.fd), + Data: v, + Address: to, + More: flags&linux.MSG_MORE != 0, + EndOfRecord: flags&linux.MSG_EOR != 0, + }} + + n, err := rpcSendMsg(t, req) + if err != syserr.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + return int(n), err + } + + // We'll have to block. Register for notification and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + + for { + n, err := rpcSendMsg(t, req) + if err != syserr.ErrWouldBlock { + return int(n), err + } + + if err := t.Block(ch); err != nil { + return 0, syserr.FromError(err) + } + } +} + +type socketProvider struct { + family int +} + +// Socket implements socket.Provider.Socket. +func (p *socketProvider) Socket(t *kernel.Task, stypeflags unix.SockType, protocol int) (*fs.File, *syserr.Error) { + // Check that we are using the RPC network stack. + stack := t.NetworkContext() + if stack == nil { + return nil, nil + } + + s, ok := stack.(*Stack) + if !ok { + return nil, nil + } + + // Only accept TCP and UDP. + // + // Try to restrict the flags we will accept to minimize backwards + // incompatability with netstack. + stype := int(stypeflags) & linux.SOCK_TYPE_MASK + switch stype { + case syscall.SOCK_STREAM: + switch protocol { + case 0, syscall.IPPROTO_TCP: + // ok + default: + return nil, nil + } + case syscall.SOCK_DGRAM: + switch protocol { + case 0, syscall.IPPROTO_UDP: + // ok + default: + return nil, nil + } + default: + return nil, nil + } + + return newSocketFile(t, s, p.family, stype, 0) +} + +// Pair implements socket.Provider.Pair. +func (p *socketProvider) Pair(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) { + // Not supported by AF_INET/AF_INET6. + return nil, nil, nil +} + +func init() { + for _, family := range []int{syscall.AF_INET, syscall.AF_INET6} { + socket.RegisterProvider(family, &socketProvider{family}) + } +} diff --git a/pkg/sentry/socket/rpcinet/stack.go b/pkg/sentry/socket/rpcinet/stack.go new file mode 100644 index 000000000..503e0e932 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/stack.go @@ -0,0 +1,175 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcinet + +import ( + "fmt" + "strings" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/hostinet" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/conn" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/notifier" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// Stack implements inet.Stack for RPC backed sockets. +type Stack struct { + // We intentionally do not allow these values to be changed to remain + // consistent with the other networking stacks. + interfaces map[int32]inet.Interface + interfaceAddrs map[int32][]inet.InterfaceAddr + supportsIPv6 bool + tcpRecvBufSize inet.TCPBufferSize + tcpSendBufSize inet.TCPBufferSize + tcpSACKEnabled bool + rpcConn *conn.RPCConnection + notifier *notifier.Notifier +} + +func readTCPBufferSizeFile(conn *conn.RPCConnection, filename string) (inet.TCPBufferSize, error) { + contents, se := conn.RPCReadFile(filename) + if se != nil { + return inet.TCPBufferSize{}, fmt.Errorf("failed to read %s: %v", filename, se) + } + ioseq := usermem.BytesIOSequence(contents) + fields := make([]int32, 3) + if n, err := usermem.CopyInt32StringsInVec(context.Background(), ioseq.IO, ioseq.Addrs, fields, ioseq.Opts); n != ioseq.NumBytes() || err != nil { + return inet.TCPBufferSize{}, fmt.Errorf("failed to parse %s (%q): got %v after %d/%d bytes", filename, contents, err, n, ioseq.NumBytes()) + } + return inet.TCPBufferSize{ + Min: int(fields[0]), + Default: int(fields[1]), + Max: int(fields[2]), + }, nil +} + +// NewStack returns a Stack containing the current state of the host network +// stack. +func NewStack(fd int32) (*Stack, error) { + sock, err := unet.NewSocket(int(fd)) + if err != nil { + return nil, err + } + + stack := &Stack{ + interfaces: make(map[int32]inet.Interface), + interfaceAddrs: make(map[int32][]inet.InterfaceAddr), + rpcConn: conn.NewRPCConnection(sock), + } + + var e error + stack.notifier, e = notifier.NewRPCNotifier(stack.rpcConn) + if e != nil { + return nil, e + } + + // Load the configuration values from procfs. + tcpRMem, e := readTCPBufferSizeFile(stack.rpcConn, "/proc/sys/net/ipv4/tcp_rmem") + if e != nil { + return nil, e + } + stack.tcpRecvBufSize = tcpRMem + + tcpWMem, e := readTCPBufferSizeFile(stack.rpcConn, "/proc/sys/net/ipv4/tcp_wmem") + if e != nil { + return nil, e + } + stack.tcpSendBufSize = tcpWMem + + ipv6, se := stack.rpcConn.RPCReadFile("/proc/net/if_inet6") + if len(string(ipv6)) > 0 { + stack.supportsIPv6 = true + } + + sackFile := "/proc/sys/net/ipv4/tcp_sack" + sack, se := stack.rpcConn.RPCReadFile(sackFile) + if se != nil { + return nil, fmt.Errorf("failed to read %s: %v", sackFile, se) + } + stack.tcpSACKEnabled = strings.TrimSpace(string(sack)) != "0" + + links, err := stack.DoNetlinkRouteRequest(syscall.RTM_GETLINK) + if err != nil { + return nil, fmt.Errorf("RTM_GETLINK failed: %v", err) + } + + addrs, err := stack.DoNetlinkRouteRequest(syscall.RTM_GETADDR) + if err != nil { + return nil, fmt.Errorf("RTM_GETADDR failed: %v", err) + } + + e = hostinet.ExtractHostInterfaces(links, addrs, stack.interfaces, stack.interfaceAddrs) + if e != nil { + return nil, e + } + + return stack, nil +} + +// Interfaces implements inet.Stack.Interfaces. +func (s *Stack) Interfaces() map[int32]inet.Interface { + return s.interfaces +} + +// InterfaceAddrs implements inet.Stack.InterfaceAddrs. +func (s *Stack) InterfaceAddrs() map[int32][]inet.InterfaceAddr { + return s.interfaceAddrs +} + +// SupportsIPv6 implements inet.Stack.SupportsIPv6. +func (s *Stack) SupportsIPv6() bool { + return s.supportsIPv6 +} + +// TCPReceiveBufferSize implements inet.Stack.TCPReceiveBufferSize. +func (s *Stack) TCPReceiveBufferSize() (inet.TCPBufferSize, error) { + return s.tcpRecvBufSize, nil +} + +// SetTCPReceiveBufferSize implements inet.Stack.SetTCPReceiveBufferSize. +func (s *Stack) SetTCPReceiveBufferSize(size inet.TCPBufferSize) error { + // To keep all the supported stacks consistent we don't allow changing this + // value even though it would be possible via an RPC. + return syserror.EACCES +} + +// TCPSendBufferSize implements inet.Stack.TCPSendBufferSize. +func (s *Stack) TCPSendBufferSize() (inet.TCPBufferSize, error) { + return s.tcpSendBufSize, nil +} + +// SetTCPSendBufferSize implements inet.Stack.SetTCPSendBufferSize. +func (s *Stack) SetTCPSendBufferSize(size inet.TCPBufferSize) error { + // To keep all the supported stacks consistent we don't allow changing this + // value even though it would be possible via an RPC. + return syserror.EACCES +} + +// TCPSACKEnabled implements inet.Stack.TCPSACKEnabled. +func (s *Stack) TCPSACKEnabled() (bool, error) { + return s.tcpSACKEnabled, nil +} + +// SetTCPSACKEnabled implements inet.Stack.SetTCPSACKEnabled. +func (s *Stack) SetTCPSACKEnabled(enabled bool) error { + // To keep all the supported stacks consistent we don't allow changing this + // value even though it would be possible via an RPC. + return syserror.EACCES +} diff --git a/pkg/sentry/socket/rpcinet/stack_unsafe.go b/pkg/sentry/socket/rpcinet/stack_unsafe.go new file mode 100644 index 000000000..9a896c623 --- /dev/null +++ b/pkg/sentry/socket/rpcinet/stack_unsafe.go @@ -0,0 +1,193 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcinet + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + pb "gvisor.googlesource.com/gvisor/pkg/sentry/socket/rpcinet/syscall_rpc_go_proto" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" +) + +// NewNetlinkRouteRequest builds a netlink message for getting the RIB, +// the routing information base. +func newNetlinkRouteRequest(proto, seq, family int) []byte { + rr := &syscall.NetlinkRouteRequest{} + rr.Header.Len = uint32(syscall.NLMSG_HDRLEN + syscall.SizeofRtGenmsg) + rr.Header.Type = uint16(proto) + rr.Header.Flags = syscall.NLM_F_DUMP | syscall.NLM_F_REQUEST + rr.Header.Seq = uint32(seq) + rr.Data.Family = uint8(family) + return netlinkRRtoWireFormat(rr) +} + +func netlinkRRtoWireFormat(rr *syscall.NetlinkRouteRequest) []byte { + b := make([]byte, rr.Header.Len) + *(*uint32)(unsafe.Pointer(&b[0:4][0])) = rr.Header.Len + *(*uint16)(unsafe.Pointer(&b[4:6][0])) = rr.Header.Type + *(*uint16)(unsafe.Pointer(&b[6:8][0])) = rr.Header.Flags + *(*uint32)(unsafe.Pointer(&b[8:12][0])) = rr.Header.Seq + *(*uint32)(unsafe.Pointer(&b[12:16][0])) = rr.Header.Pid + b[16] = byte(rr.Data.Family) + return b +} + +func (s *Stack) getNetlinkFd() (uint32, *syserr.Error) { + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Socket{&pb.SocketRequest{Family: int64(syscall.AF_NETLINK), Type: int64(syscall.SOCK_RAW | syscall.SOCK_NONBLOCK), Protocol: int64(syscall.NETLINK_ROUTE)}}}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Socket).Socket.Result + if e, ok := res.(*pb.SocketResponse_ErrorNumber); ok { + return 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + return res.(*pb.SocketResponse_Fd).Fd, nil +} + +func (s *Stack) bindNetlinkFd(fd uint32, sockaddr []byte) *syserr.Error { + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Bind{&pb.BindRequest{Fd: fd, Address: sockaddr}}}, false /* ignoreResult */) + <-c + + if e := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Bind).Bind.ErrorNumber; e != 0 { + return syserr.FromHost(syscall.Errno(e)) + } + return nil +} + +func (s *Stack) closeNetlinkFd(fd uint32) { + _, _ = s.rpcConn.NewRequest(pb.SyscallRequest{Args: &pb.SyscallRequest_Close{&pb.CloseRequest{Fd: fd}}}, true /* ignoreResult */) +} + +func (s *Stack) rpcSendMsg(req *pb.SyscallRequest_Sendmsg) (uint32, *syserr.Error) { + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Sendmsg).Sendmsg.Result + if e, ok := res.(*pb.SendmsgResponse_ErrorNumber); ok { + return 0, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.SendmsgResponse_Length).Length, nil +} + +func (s *Stack) sendMsg(fd uint32, buf []byte, to []byte, flags int) (int, *syserr.Error) { + // Whitelist flags. + if flags&^(syscall.MSG_DONTWAIT|syscall.MSG_EOR|syscall.MSG_FASTOPEN|syscall.MSG_MORE|syscall.MSG_NOSIGNAL) != 0 { + return 0, syserr.ErrInvalidArgument + } + + req := &pb.SyscallRequest_Sendmsg{&pb.SendmsgRequest{ + Fd: fd, + Data: buf, + Address: to, + More: flags&linux.MSG_MORE != 0, + EndOfRecord: flags&linux.MSG_EOR != 0, + }} + + n, err := s.rpcSendMsg(req) + return int(n), err +} + +func (s *Stack) rpcRecvMsg(req *pb.SyscallRequest_Recvmsg) (*pb.RecvmsgResponse_ResultPayload, *syserr.Error) { + id, c := s.rpcConn.NewRequest(pb.SyscallRequest{Args: req}, false /* ignoreResult */) + <-c + + res := s.rpcConn.Request(id).Result.(*pb.SyscallResponse_Recvmsg).Recvmsg.Result + if e, ok := res.(*pb.RecvmsgResponse_ErrorNumber); ok { + return nil, syserr.FromHost(syscall.Errno(e.ErrorNumber)) + } + + return res.(*pb.RecvmsgResponse_Payload).Payload, nil +} + +func (s *Stack) recvMsg(fd, l, flags uint32) ([]byte, *syserr.Error) { + req := &pb.SyscallRequest_Recvmsg{&pb.RecvmsgRequest{ + Fd: fd, + Length: l, + Sender: false, + Trunc: flags&linux.MSG_TRUNC != 0, + Peek: flags&linux.MSG_PEEK != 0, + }} + + res, err := s.rpcRecvMsg(req) + if err != nil { + return nil, err + } + return res.Data, nil +} + +func (s *Stack) netlinkRequest(proto, family int) ([]byte, error) { + fd, err := s.getNetlinkFd() + if err != nil { + return nil, err.ToError() + } + defer s.closeNetlinkFd(fd) + + lsa := syscall.SockaddrNetlink{Family: syscall.AF_NETLINK} + b := binary.Marshal(nil, usermem.ByteOrder, &lsa) + if err := s.bindNetlinkFd(fd, b); err != nil { + return nil, err.ToError() + } + + wb := newNetlinkRouteRequest(proto, 1, family) + _, err = s.sendMsg(fd, wb, b, 0) + if err != nil { + return nil, err.ToError() + } + + var tab []byte +done: + for { + rb, err := s.recvMsg(fd, uint32(syscall.Getpagesize()), 0) + nr := len(rb) + if err != nil { + return nil, err.ToError() + } + + if nr < syscall.NLMSG_HDRLEN { + return nil, syserr.ErrInvalidArgument.ToError() + } + + tab = append(tab, rb...) + msgs, e := syscall.ParseNetlinkMessage(rb) + if e != nil { + return nil, e + } + + for _, m := range msgs { + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + return nil, syserr.ErrInvalidArgument.ToError() + } + } + } + + return tab, nil +} + +// DoNetlinkRouteRequest returns routing information base, also known as RIB, +// which consists of network facility information, states and parameters. +func (s *Stack) DoNetlinkRouteRequest(req int) ([]syscall.NetlinkMessage, error) { + data, err := s.netlinkRequest(req, syscall.AF_UNSPEC) + if err != nil { + return nil, err + } + return syscall.ParseNetlinkMessage(data) +} diff --git a/pkg/sentry/socket/rpcinet/syscall_rpc.proto b/pkg/sentry/socket/rpcinet/syscall_rpc.proto new file mode 100644 index 000000000..b845b1bce --- /dev/null +++ b/pkg/sentry/socket/rpcinet/syscall_rpc.proto @@ -0,0 +1,351 @@ +syntax = "proto3"; + +// package syscall_rpc is a set of networking related system calls that can be +// forwarded to a socket gofer. +// +// TODO: Document individual RPCs. +package syscall_rpc; + +message SendmsgRequest { + uint32 fd = 1; + bytes data = 2; + bytes address = 3; + bool more = 4; + bool end_of_record = 5; +} + +message SendmsgResponse { + oneof result { + uint32 error_number = 1; + uint32 length = 2; + } +} + +message IOCtlRequest { + uint32 fd = 1; + uint32 cmd = 2; + uint64 arg = 3; +} + +message IOCtlResponse { + oneof result { + uint32 error_number = 1; + uint64 value = 2; + } +} + +message RecvmsgRequest { + uint32 fd = 1; + uint32 length = 2; + bool sender = 3; + bool peek = 4; + bool trunc = 5; +} + +message OpenRequest { + bytes path = 1; + uint32 flags = 2; + uint32 mode = 3; +} + +message OpenResponse { + oneof result { + uint32 error_number = 1; + uint32 fd = 2; + } +} + +message ReadRequest { + uint32 fd = 1; + uint32 length = 2; +} + +message ReadResponse { + oneof result { + uint32 error_number = 1; + bytes data = 2; + } +} + +message ReadFileRequest { + string path = 1; +} + +message ReadFileResponse { + oneof result { + uint32 error_number = 1; + bytes data = 2; + } +} + +message WriteRequest { + uint32 fd = 1; + bytes data = 2; +} + +message WriteResponse { + oneof result { + uint32 error_number = 1; + uint32 length = 2; + } +} + +message WriteFileRequest { + string path = 1; + bytes content = 2; +} + +message WriteFileResponse { + uint32 error_number = 1; + uint32 written = 2; +} + +message AddressResponse { + bytes address = 1; + uint32 length = 2; +} + +message RecvmsgResponse { + message ResultPayload { + bytes data = 1; + AddressResponse address = 2; + uint32 length = 3; + } + oneof result { + uint32 error_number = 1; + ResultPayload payload = 2; + } +} + +message BindRequest { + uint32 fd = 1; + bytes address = 2; +} + +message BindResponse { + uint32 error_number = 1; +} + +message AcceptRequest { + uint32 fd = 1; + bool peer = 2; + int64 flags = 3; +} + +message AcceptResponse { + message ResultPayload { + uint32 fd = 1; + AddressResponse address = 2; + } + oneof result { + uint32 error_number = 1; + ResultPayload payload = 2; + } +} + +message ConnectRequest { + uint32 fd = 1; + bytes address = 2; +} + +message ConnectResponse { + uint32 error_number = 1; +} + +message ListenRequest { + uint32 fd = 1; + int64 backlog = 2; +} + +message ListenResponse { + uint32 error_number = 1; +} + +message ShutdownRequest { + uint32 fd = 1; + int64 how = 2; +} + +message ShutdownResponse { + uint32 error_number = 1; +} + +message CloseRequest { + uint32 fd = 1; +} + +message CloseResponse { + uint32 error_number = 1; +} + +message GetSockOptRequest { + uint32 fd = 1; + int64 level = 2; + int64 name = 3; + uint32 length = 4; +} + +message GetSockOptResponse { + oneof result { + uint32 error_number = 1; + bytes opt = 2; + } +} + +message SetSockOptRequest { + uint32 fd = 1; + int64 level = 2; + int64 name = 3; + bytes opt = 4; +} + +message SetSockOptResponse { + uint32 error_number = 1; +} + +message GetSockNameRequest { + uint32 fd = 1; +} + +message GetSockNameResponse { + oneof result { + uint32 error_number = 1; + AddressResponse address = 2; + } +} + +message GetPeerNameRequest { + uint32 fd = 1; +} + +message GetPeerNameResponse { + oneof result { + uint32 error_number = 1; + AddressResponse address = 2; + } +} + +message SocketRequest { + int64 family = 1; + int64 type = 2; + int64 protocol = 3; +} + +message SocketResponse { + oneof result { + uint32 error_number = 1; + uint32 fd = 2; + } +} + +message EpollWaitRequest { + uint32 fd = 1; + uint32 num_events = 2; + sint64 msec = 3; +} + +message EpollEvent { + uint32 fd = 1; + uint32 events = 2; +} + +message EpollEvents { + repeated EpollEvent events = 1; +} + +message EpollWaitResponse { + oneof result { + uint32 error_number = 1; + EpollEvents events = 2; + } +} + +message EpollCtlRequest { + uint32 epfd = 1; + int64 op = 2; + uint32 fd = 3; + EpollEvent event = 4; +} + +message EpollCtlResponse { + uint32 error_number = 1; +} + +message EpollCreate1Request { + int64 flag = 1; +} + +message EpollCreate1Response { + oneof result { + uint32 error_number = 1; + uint32 fd = 2; + } +} + +message PollRequest { + uint32 fd = 1; + uint32 events = 2; +} + +message PollResponse { + oneof result { + uint32 error_number = 1; + uint32 events = 2; + } +} + +message SyscallRequest { + oneof args { + SocketRequest socket = 1; + SendmsgRequest sendmsg = 2; + RecvmsgRequest recvmsg = 3; + BindRequest bind = 4; + AcceptRequest accept = 5; + ConnectRequest connect = 6; + ListenRequest listen = 7; + ShutdownRequest shutdown = 8; + CloseRequest close = 9; + GetSockOptRequest get_sock_opt = 10; + SetSockOptRequest set_sock_opt = 11; + GetSockNameRequest get_sock_name = 12; + GetPeerNameRequest get_peer_name = 13; + EpollWaitRequest epoll_wait = 14; + EpollCtlRequest epoll_ctl = 15; + EpollCreate1Request epoll_create1 = 16; + PollRequest poll = 17; + ReadRequest read = 18; + WriteRequest write = 19; + OpenRequest open = 20; + IOCtlRequest ioctl = 21; + WriteFileRequest write_file = 22; + ReadFileRequest read_file = 23; + } +} + +message SyscallResponse { + oneof result { + SocketResponse socket = 1; + SendmsgResponse sendmsg = 2; + RecvmsgResponse recvmsg = 3; + BindResponse bind = 4; + AcceptResponse accept = 5; + ConnectResponse connect = 6; + ListenResponse listen = 7; + ShutdownResponse shutdown = 8; + CloseResponse close = 9; + GetSockOptResponse get_sock_opt = 10; + SetSockOptResponse set_sock_opt = 11; + GetSockNameResponse get_sock_name = 12; + GetPeerNameResponse get_peer_name = 13; + EpollWaitResponse epoll_wait = 14; + EpollCtlResponse epoll_ctl = 15; + EpollCreate1Response epoll_create1 = 16; + PollResponse poll = 17; + ReadResponse read = 18; + WriteResponse write = 19; + OpenResponse open = 20; + IOCtlResponse ioctl = 21; + WriteFileResponse write_file = 22; + ReadFileResponse read_file = 23; + } +} diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go new file mode 100644 index 000000000..be3026bfa --- /dev/null +++ b/pkg/sentry/socket/socket.go @@ -0,0 +1,205 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package socket provides the interfaces that need to be provided by socket +// implementations and providers, as well as per family demultiplexing of socket +// creation. +package socket + +import ( + "fmt" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/device" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// Socket is the interface containing socket syscalls used by the syscall layer +// to redirect them to the appropriate implementation. +type Socket interface { + fs.FileOperations + + // Connect implements the connect(2) linux syscall. + Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error + + // Accept implements the accept4(2) linux syscall. + // Returns fd, real peer address length and error. Real peer address + // length is only set if len(peer) > 0. + Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) + + // Bind implements the bind(2) linux syscall. + Bind(t *kernel.Task, sockaddr []byte) *syserr.Error + + // Listen implements the listen(2) linux syscall. + Listen(t *kernel.Task, backlog int) *syserr.Error + + // Shutdown implements the shutdown(2) linux syscall. + Shutdown(t *kernel.Task, how int) *syserr.Error + + // GetSockOpt implements the getsockopt(2) linux syscall. + GetSockOpt(t *kernel.Task, level int, name int, outLen int) (interface{}, *syserr.Error) + + // SetSockOpt implements the setsockopt(2) linux syscall. + SetSockOpt(t *kernel.Task, level int, name int, opt []byte) *syserr.Error + + // GetSockName implements the getsockname(2) linux syscall. + // + // addrLen is the address length to be returned to the application, not + // necessarily the actual length of the address. + GetSockName(t *kernel.Task) (addr interface{}, addrLen uint32, err *syserr.Error) + + // GetPeerName implements the getpeername(2) linux syscall. + // + // addrLen is the address length to be returned to the application, not + // necessarily the actual length of the address. + GetPeerName(t *kernel.Task) (addr interface{}, addrLen uint32, err *syserr.Error) + + // RecvMsg implements the recvmsg(2) linux syscall. + // + // senderAddrLen is the address length to be returned to the application, + // not necessarily the actual length of the address. + RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages unix.ControlMessages, err *syserr.Error) + + // SendMsg implements the sendmsg(2) linux syscall. SendMsg does not take + // ownership of the ControlMessage on error. + SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (n int, err *syserr.Error) + + // SetRecvTimeout sets the timeout (in ns) for recv operations. Zero means + // no timeout. + SetRecvTimeout(nanoseconds int64) + + // RecvTimeout gets the current timeout (in ns) for recv operations. Zero + // means no timeout. + RecvTimeout() int64 +} + +// Provider is the interface implemented by providers of sockets for specific +// address families (e.g., AF_INET). +type Provider interface { + // Socket creates a new socket. + // + // If a nil Socket _and_ a nil error is returned, it means that the + // protocol is not supported. A non-nil error should only be returned + // if the protocol is supported, but an error occurs during creation. + Socket(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *syserr.Error) + + // Pair creates a pair of connected sockets. + // + // See Socket for error information. + Pair(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) +} + +// families holds a map of all known address families and their providers. +var families = make(map[int][]Provider) + +// RegisterProvider registers the provider of a given address family so that +// sockets of that type can be created via socket() and/or socketpair() +// syscalls. +func RegisterProvider(family int, provider Provider) { + families[family] = append(families[family], provider) +} + +// New creates a new socket with the given family, type and protocol. +func New(t *kernel.Task, family int, stype unix.SockType, protocol int) (*fs.File, *syserr.Error) { + for _, p := range families[family] { + s, err := p.Socket(t, stype, protocol) + if err != nil { + return nil, err + } + if s != nil { + return s, nil + } + } + + return nil, syserr.ErrAddressFamilyNotSupported +} + +// Pair creates a new connected socket pair with the given family, type and +// protocol. +func Pair(t *kernel.Task, family int, stype unix.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) { + providers, ok := families[family] + if !ok { + return nil, nil, syserr.ErrAddressFamilyNotSupported + } + + for _, p := range providers { + s, t, err := p.Pair(t, stype, protocol) + if err != nil { + return nil, nil, err + } + if s != nil && t != nil { + return s, t, nil + } + } + + return nil, nil, syserr.ErrSocketNotSupported +} + +// NewDirent returns a sockfs fs.Dirent that resides on device d. +func NewDirent(ctx context.Context, d *device.Device) *fs.Dirent { + ino := d.NextIno() + // There is no real filesystem backing this pipe, so we pass in a nil + // Filesystem. + inode := fs.NewInode(fsutil.NewSimpleInodeOperations(fsutil.InodeSimpleAttributes{ + FSType: linux.SOCKFS_MAGIC, + UAttr: fs.WithCurrentTime(ctx, fs.UnstableAttr{ + Owner: fs.FileOwnerFromContext(ctx), + Perms: fs.FilePermissions{ + User: fs.PermMask{Read: true, Write: true}, + }, + Links: 1, + }), + }), fs.NewNonCachingMountSource(nil, fs.MountSourceFlags{}), fs.StableAttr{ + Type: fs.Socket, + DeviceID: d.DeviceID(), + InodeID: ino, + BlockSize: usermem.PageSize, + }) + + // Dirent name matches net/socket.c:sockfs_dname. + return fs.NewDirent(inode, fmt.Sprintf("socket:[%d]", ino)) +} + +// ReceiveTimeout stores a timeout for receive calls. +// +// It is meant to be embedded into Socket implementations to help satisfy the +// interface. +// +// Care must be taken when copying ReceiveTimeout as it contains atomic +// variables. +type ReceiveTimeout struct { + // ns is length of the timeout in nanoseconds. + // + // ns must be accessed atomically. + ns int64 +} + +// SetRecvTimeout implements Socket.SetRecvTimeout. +func (rt *ReceiveTimeout) SetRecvTimeout(nanoseconds int64) { + atomic.StoreInt64(&rt.ns, nanoseconds) +} + +// RecvTimeout implements Socket.RecvTimeout. +func (rt *ReceiveTimeout) RecvTimeout() int64 { + return atomic.LoadInt64(&rt.ns) +} diff --git a/pkg/sentry/socket/unix/BUILD b/pkg/sentry/socket/unix/BUILD new file mode 100644 index 000000000..1ec6eb7ed --- /dev/null +++ b/pkg/sentry/socket/unix/BUILD @@ -0,0 +1,48 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "unix_state", + srcs = [ + "unix.go", + ], + out = "unix_state.go", + package = "unix", +) + +go_library( + name = "unix", + srcs = [ + "device.go", + "io.go", + "unix.go", + "unix_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/abi/linux", + "//pkg/refs", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/device", + "//pkg/sentry/fs", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/sentry/safemem", + "//pkg/sentry/socket", + "//pkg/sentry/socket/control", + "//pkg/sentry/socket/epsocket", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/socket/unix/device.go b/pkg/sentry/socket/unix/device.go new file mode 100644 index 000000000..e8bcc7a9f --- /dev/null +++ b/pkg/sentry/socket/unix/device.go @@ -0,0 +1,20 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unix + +import "gvisor.googlesource.com/gvisor/pkg/sentry/device" + +// unixSocketDevice is the unix socket virtual device. +var unixSocketDevice = device.NewAnonDevice() diff --git a/pkg/sentry/socket/unix/io.go b/pkg/sentry/socket/unix/io.go new file mode 100644 index 000000000..0ca2e35d0 --- /dev/null +++ b/pkg/sentry/socket/unix/io.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unix + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// EndpointWriter implements safemem.Writer that writes to a unix.Endpoint. +// +// EndpointWriter is not thread-safe. +type EndpointWriter struct { + // Endpoint is the unix.Endpoint to write to. + Endpoint unix.Endpoint + + // Control is the control messages to send. + Control unix.ControlMessages + + // To is the endpoint to send to. May be nil. + To unix.BoundEndpoint +} + +// WriteFromBlocks implements safemem.Writer.WriteFromBlocks. +func (w *EndpointWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64, error) { + return safemem.FromVecWriterFunc{func(bufs [][]byte) (int64, error) { + n, err := w.Endpoint.SendMsg(bufs, w.Control, w.To) + if err != nil { + return int64(n), syserr.TranslateNetstackError(err).ToError() + } + return int64(n), nil + }}.WriteFromBlocks(srcs) +} + +// EndpointReader implements safemem.Reader that reads from a unix.Endpoint. +// +// EndpointReader is not thread-safe. +type EndpointReader struct { + // Endpoint is the unix.Endpoint to read from. + Endpoint unix.Endpoint + + // Creds indicates if credential control messages are requested. + Creds bool + + // NumRights is the number of SCM_RIGHTS FDs requested. + NumRights uintptr + + // Peek indicates that the data should not be consumed from the + // endpoint. + Peek bool + + // MsgSize is the size of the message that was read from. For stream + // sockets, it is the amount read. + MsgSize uintptr + + // From, if not nil, will be set with the address read from. + From *tcpip.FullAddress + + // Control contains the received control messages. + Control unix.ControlMessages +} + +// ReadToBlocks implements safemem.Reader.ReadToBlocks. +func (r *EndpointReader) ReadToBlocks(dsts safemem.BlockSeq) (uint64, error) { + return safemem.FromVecReaderFunc{func(bufs [][]byte) (int64, error) { + n, ms, c, err := r.Endpoint.RecvMsg(bufs, r.Creds, r.NumRights, r.Peek, r.From) + r.Control = c + r.MsgSize = ms + if err != nil { + return int64(n), syserr.TranslateNetstackError(err).ToError() + } + return int64(n), nil + }}.ReadToBlocks(dsts) +} diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go new file mode 100644 index 000000000..a4b414851 --- /dev/null +++ b/pkg/sentry/socket/unix/unix.go @@ -0,0 +1,571 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package unix provides an implementation of the socket.Socket interface for +// the AF_UNIX protocol family. +package unix + +import ( + "strings" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/refs" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/epsocket" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserr" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// SocketOperations is a Unix socket. It is similar to an epsocket, except it is backed +// by a unix.Endpoint instead of a tcpip.Endpoint. +type SocketOperations struct { + refs.AtomicRefCount + socket.ReceiveTimeout + fsutil.PipeSeek `state:"nosave"` + fsutil.NotDirReaddir `state:"nosave"` + fsutil.NoFsync `state:"nosave"` + fsutil.NoopFlush `state:"nosave"` + fsutil.NoMMap `state:"nosave"` + ep unix.Endpoint +} + +// New creates a new unix socket. +func New(ctx context.Context, endpoint unix.Endpoint) *fs.File { + dirent := socket.NewDirent(ctx, unixSocketDevice) + return NewWithDirent(ctx, dirent, endpoint, fs.FileFlags{Read: true, Write: true}) +} + +// NewWithDirent creates a new unix socket using an existing dirent. +func NewWithDirent(ctx context.Context, d *fs.Dirent, ep unix.Endpoint, flags fs.FileFlags) *fs.File { + return fs.NewFile(ctx, d, flags, &SocketOperations{ + ep: ep, + }) +} + +// DecRef implements RefCounter.DecRef. +func (s *SocketOperations) DecRef() { + s.DecRefWithDestructor(func() { + s.ep.Close() + }) +} + +// Release implemements fs.FileOperations.Release. +func (s *SocketOperations) Release() { + // Release only decrements a reference on s because s may be referenced in + // the abstract socket namespace. + s.DecRef() +} + +// Endpoint extracts the unix.Endpoint. +func (s *SocketOperations) Endpoint() unix.Endpoint { + return s.ep +} + +// extractPath extracts and validates the address. +func extractPath(sockaddr []byte) (string, *syserr.Error) { + addr, err := epsocket.GetAddress(linux.AF_UNIX, sockaddr) + if err != nil { + return "", err + } + + // The address is trimmed by GetAddress. + p := string(addr.Addr) + if p == "" { + // Not allowed. + return "", syserr.ErrInvalidArgument + } + if p[len(p)-1] == '/' { + // Weird, they tried to bind '/a/b/c/'? + return "", syserr.ErrIsDir + } + + return p, nil +} + +// GetPeerName implements the linux syscall getpeername(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) GetPeerName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr, err := s.ep.GetRemoteAddress() + if err != nil { + return nil, 0, syserr.TranslateNetstackError(err) + } + + a, l := epsocket.ConvertAddress(linux.AF_UNIX, addr) + return a, l, nil +} + +// GetSockName implements the linux syscall getsockname(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) GetSockName(t *kernel.Task) (interface{}, uint32, *syserr.Error) { + addr, err := s.ep.GetLocalAddress() + if err != nil { + return nil, 0, syserr.TranslateNetstackError(err) + } + + a, l := epsocket.ConvertAddress(linux.AF_UNIX, addr) + return a, l, nil +} + +// Ioctl implements fs.FileOperations.Ioctl. +func (s *SocketOperations) Ioctl(ctx context.Context, io usermem.IO, args arch.SyscallArguments) (uintptr, error) { + return epsocket.Ioctl(ctx, s.ep, io, args) +} + +// GetSockOpt implements the linux syscall getsockopt(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) GetSockOpt(t *kernel.Task, level, name, outLen int) (interface{}, *syserr.Error) { + return epsocket.GetSockOpt(t, s, s.ep, linux.AF_UNIX, s.ep.Type(), level, name, outLen) +} + +// Listen implements the linux syscall listen(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) Listen(t *kernel.Task, backlog int) *syserr.Error { + return syserr.TranslateNetstackError(s.ep.Listen(backlog)) +} + +// blockingAccept implements a blocking version of accept(2), that is, if no +// connections are ready to be accept, it will block until one becomes ready. +func (s *SocketOperations) blockingAccept(t *kernel.Task) (unix.Endpoint, *syserr.Error) { + // Register for notifications. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + // Try to accept the connection; if it fails, then wait until we get a + // notification. + for { + if ep, err := s.ep.Accept(); err != tcpip.ErrWouldBlock { + return ep, syserr.TranslateNetstackError(err) + } + + if err := t.Block(ch); err != nil { + return nil, syserr.FromError(err) + } + } +} + +// Accept implements the linux syscall accept(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) Accept(t *kernel.Task, peerRequested bool, flags int, blocking bool) (kdefs.FD, interface{}, uint32, *syserr.Error) { + // Issue the accept request to get the new endpoint. + ep, err := s.ep.Accept() + if err != nil { + if err != tcpip.ErrWouldBlock || !blocking { + return 0, nil, 0, syserr.TranslateNetstackError(err) + } + + var err *syserr.Error + ep, err = s.blockingAccept(t) + if err != nil { + return 0, nil, 0, err + } + } + + ns := New(t, ep) + defer ns.DecRef() + + if flags&linux.SOCK_NONBLOCK != 0 { + flags := ns.Flags() + flags.NonBlocking = true + ns.SetFlags(flags.Settable()) + } + + var addr interface{} + var addrLen uint32 + if peerRequested { + // Get address of the peer. + var err *syserr.Error + addr, addrLen, err = ns.FileOperations.(*SocketOperations).GetPeerName(t) + if err != nil { + return 0, nil, 0, err + } + } + + fdFlags := kernel.FDFlags{ + CloseOnExec: flags&linux.SOCK_CLOEXEC != 0, + } + fd, e := t.FDMap().NewFDFrom(0, ns, fdFlags, t.ThreadGroup().Limits()) + if e != nil { + return 0, nil, 0, syserr.FromError(e) + } + + return fd, addr, addrLen, nil +} + +// Bind implements the linux syscall bind(2) for unix sockets. +func (s *SocketOperations) Bind(t *kernel.Task, sockaddr []byte) *syserr.Error { + p, e := extractPath(sockaddr) + if e != nil { + return e + } + + bep, ok := s.ep.(unix.BoundEndpoint) + if !ok { + // This socket can't be bound. + return syserr.ErrInvalidArgument + } + + return syserr.TranslateNetstackError(s.ep.Bind(tcpip.FullAddress{Addr: tcpip.Address(p)}, func() *tcpip.Error { + // Is it abstract? + if p[0] == 0 { + if t.IsNetworkNamespaced() { + return tcpip.ErrInvalidEndpointState + } + if err := t.AbstractSockets().Bind(p[1:], bep, s); err != nil { + // tcpip.ErrPortInUse corresponds to EADDRINUSE. + return tcpip.ErrPortInUse + } + } else { + // The parent and name. + var d *fs.Dirent + var name string + + cwd := t.FSContext().WorkingDirectory() + defer cwd.DecRef() + + // Is there no slash at all? + if !strings.Contains(p, "/") { + d = cwd + name = p + } else { + root := t.FSContext().RootDirectory() + defer root.DecRef() + // Find the last path component, we know that something follows + // that final slash, otherwise extractPath() would have failed. + lastSlash := strings.LastIndex(p, "/") + subPath := p[:lastSlash] + if subPath == "" { + // Fix up subpath in case file is in root. + subPath = "/" + } + var err error + d, err = t.MountNamespace().FindInode(t, root, cwd, subPath, fs.DefaultTraversalLimit) + if err != nil { + // No path available. + return tcpip.ErrNoSuchFile + } + defer d.DecRef() + name = p[lastSlash+1:] + } + + // Create the socket. + if err := d.Bind(t, t.FSContext().RootDirectory(), name, bep, fs.FilePermissions{User: fs.PermMask{Read: true}}); err != nil { + return tcpip.ErrPortInUse + } + } + + return nil + })) +} + +// extractEndpoint retrieves the unix.BoundEndpoint associated with a Unix +// socket path. The Release must be called on the unix.BoundEndpoint when the +// caller is done with it. +func extractEndpoint(t *kernel.Task, sockaddr []byte) (unix.BoundEndpoint, *syserr.Error) { + path, err := extractPath(sockaddr) + if err != nil { + return nil, err + } + + // Is it abstract? + if path[0] == 0 { + if t.IsNetworkNamespaced() { + return nil, syserr.ErrInvalidArgument + } + + ep := t.AbstractSockets().BoundEndpoint(path[1:]) + if ep == nil { + // No socket found. + return nil, syserr.ErrConnectionRefused + } + + return ep, nil + } + + // Find the node in the filesystem. + root := t.FSContext().RootDirectory() + cwd := t.FSContext().WorkingDirectory() + d, e := t.MountNamespace().FindInode(t, root, cwd, path, fs.DefaultTraversalLimit) + cwd.DecRef() + root.DecRef() + if e != nil { + return nil, syserr.FromError(e) + } + + // Extract the endpoint if one is there. + ep := d.Inode.BoundEndpoint(path) + d.DecRef() + if ep == nil { + // No socket! + return nil, syserr.ErrConnectionRefused + } + + return ep, nil +} + +// Connect implements the linux syscall connect(2) for unix sockets. +func (s *SocketOperations) Connect(t *kernel.Task, sockaddr []byte, blocking bool) *syserr.Error { + ep, err := extractEndpoint(t, sockaddr) + if err != nil { + return err + } + defer ep.Release() + + // Connect the server endpoint. + return syserr.TranslateNetstackError(s.ep.Connect(ep)) +} + +// Writev implements fs.FileOperations.Write. +func (s *SocketOperations) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, _ int64) (int64, error) { + t := kernel.TaskFromContext(ctx) + ctrl := control.New(t, s.ep, nil) + + if src.NumBytes() == 0 { + nInt, tcpipError := s.ep.SendMsg([][]byte{}, ctrl, nil) + return int64(nInt), syserr.TranslateNetstackError(tcpipError).ToError() + } + + return src.CopyInTo(ctx, &EndpointWriter{ + Endpoint: s.ep, + Control: ctrl, + To: nil, + }) +} + +// SendMsg implements the linux syscall sendmsg(2) for unix sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to []byte, flags int, controlMessages unix.ControlMessages) (int, *syserr.Error) { + w := EndpointWriter{ + Endpoint: s.ep, + Control: controlMessages, + To: nil, + } + if len(to) > 0 { + ep, err := extractEndpoint(t, to) + if err != nil { + return 0, err + } + defer ep.Release() + w.To = ep + } + + if n, err := src.CopyInTo(t, &w); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + return int(n), syserr.FromError(err) + } + + // We'll have to block. Register for notification and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventOut) + defer s.EventUnregister(&e) + + for { + if n, err := src.CopyInTo(t, &w); err != syserror.ErrWouldBlock { + return int(n), syserr.FromError(err) + } + + if err := t.Block(ch); err != nil { + return 0, syserr.FromError(err) + } + } +} + +// Passcred implements unix.Credentialer.Passcred. +func (s *SocketOperations) Passcred() bool { + return s.ep.Passcred() +} + +// ConnectedPasscred implements unix.Credentialer.ConnectedPasscred. +func (s *SocketOperations) ConnectedPasscred() bool { + return s.ep.ConnectedPasscred() +} + +// Readiness implements waiter.Waitable.Readiness. +func (s *SocketOperations) Readiness(mask waiter.EventMask) waiter.EventMask { + return s.ep.Readiness(mask) +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (s *SocketOperations) EventRegister(e *waiter.Entry, mask waiter.EventMask) { + s.ep.EventRegister(e, mask) +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (s *SocketOperations) EventUnregister(e *waiter.Entry) { + s.ep.EventUnregister(e) +} + +// SetSockOpt implements the linux syscall setsockopt(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) SetSockOpt(t *kernel.Task, level int, name int, optVal []byte) *syserr.Error { + return epsocket.SetSockOpt(t, s, s.ep, level, name, optVal) +} + +// Shutdown implements the linux syscall shutdown(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) Shutdown(t *kernel.Task, how int) *syserr.Error { + f, err := epsocket.ConvertShutdown(how) + if err != nil { + return err + } + + // Issue shutdown request. + return syserr.TranslateNetstackError(s.ep.Shutdown(f)) +} + +// Read implements fs.FileOperations.Read. +func (s *SocketOperations) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, _ int64) (int64, error) { + if dst.NumBytes() == 0 { + return 0, nil + } + return dst.CopyOutFrom(ctx, &EndpointReader{ + Endpoint: s.ep, + NumRights: 0, + Peek: false, + From: nil, + }) +} + +// RecvMsg implements the linux syscall recvmsg(2) for sockets backed by +// a unix.Endpoint. +func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags int, haveDeadline bool, deadline ktime.Time, senderRequested bool, controlDataLen uint64) (n int, senderAddr interface{}, senderAddrLen uint32, controlMessages unix.ControlMessages, err *syserr.Error) { + trunc := flags&linux.MSG_TRUNC != 0 + peek := flags&linux.MSG_PEEK != 0 + + // Calculate the number of FDs for which we have space and if we are + // requesting credentials. + var wantCreds bool + rightsLen := int(controlDataLen) - syscall.SizeofCmsghdr + if s.Passcred() { + // Credentials take priority if they are enabled and there is space. + wantCreds = rightsLen > 0 + credLen := syscall.CmsgSpace(syscall.SizeofUcred) + rightsLen -= credLen + } + // FDs are 32 bit (4 byte) ints. + numRights := rightsLen / 4 + if numRights < 0 { + numRights = 0 + } + + r := EndpointReader{ + Endpoint: s.ep, + Creds: wantCreds, + NumRights: uintptr(numRights), + Peek: peek, + } + if senderRequested { + r.From = &tcpip.FullAddress{} + } + if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || flags&linux.MSG_DONTWAIT != 0 { + var from interface{} + var fromLen uint32 + if r.From != nil { + from, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From) + } + if trunc { + n = int64(r.MsgSize) + } + return int(n), from, fromLen, r.Control, syserr.FromError(err) + } + + // We'll have to block. Register for notification and keep trying to + // send all the data. + e, ch := waiter.NewChannelEntry(nil) + s.EventRegister(&e, waiter.EventIn) + defer s.EventUnregister(&e) + + for { + if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock { + var from interface{} + var fromLen uint32 + if r.From != nil { + from, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From) + } + if trunc { + n = int64(r.MsgSize) + } + return int(n), from, fromLen, r.Control, syserr.FromError(err) + } + + if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + if err == syserror.ETIMEDOUT { + return 0, nil, 0, unix.ControlMessages{}, syserr.ErrTryAgain + } + return 0, nil, 0, unix.ControlMessages{}, syserr.FromError(err) + } + } +} + +// provider is a unix domain socket provider. +type provider struct{} + +// Socket returns a new unix domain socket. +func (*provider) Socket(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *syserr.Error) { + // Check arguments. + if protocol != 0 { + return nil, syserr.ErrInvalidArgument + } + + // Create the endpoint and socket. + var ep unix.Endpoint + switch stype { + case linux.SOCK_DGRAM: + ep = unix.NewConnectionless() + case linux.SOCK_STREAM, linux.SOCK_SEQPACKET: + ep = unix.NewConnectioned(stype, t.Kernel()) + default: + return nil, syserr.ErrInvalidArgument + } + + return New(t, ep), nil +} + +// Pair creates a new pair of AF_UNIX connected sockets. +func (*provider) Pair(t *kernel.Task, stype unix.SockType, protocol int) (*fs.File, *fs.File, *syserr.Error) { + // Check arguments. + if protocol != 0 { + return nil, nil, syserr.ErrInvalidArgument + } + + switch stype { + case linux.SOCK_STREAM, linux.SOCK_DGRAM, linux.SOCK_SEQPACKET: + default: + return nil, nil, syserr.ErrInvalidArgument + } + + // Create the endpoints and sockets. + ep1, ep2 := unix.NewPair(stype, t.Kernel()) + s1 := New(t, ep1) + s2 := New(t, ep2) + + return s1, s2, nil +} + +func init() { + socket.RegisterProvider(linux.AF_UNIX, &provider{}) +} diff --git a/pkg/sentry/state/BUILD b/pkg/sentry/state/BUILD new file mode 100644 index 000000000..7148df395 --- /dev/null +++ b/pkg/sentry/state/BUILD @@ -0,0 +1,21 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "state", + srcs = [ + "state.go", + "state_metadata.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/state", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/sentry/inet", + "//pkg/sentry/kernel", + "//pkg/sentry/platform", + "//pkg/sentry/watchdog", + "//pkg/state/statefile", + ], +) diff --git a/pkg/sentry/state/state.go b/pkg/sentry/state/state.go new file mode 100644 index 000000000..5bec4e018 --- /dev/null +++ b/pkg/sentry/state/state.go @@ -0,0 +1,113 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package state provides high-level state wrappers. +package state + +import ( + "fmt" + "io" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/sentry/inet" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/platform" + "gvisor.googlesource.com/gvisor/pkg/sentry/watchdog" + "gvisor.googlesource.com/gvisor/pkg/state/statefile" +) + +// ErrStateFile is returned when the state file cannot be opened. +type ErrStateFile struct { + err error +} + +// Error implements error.Error(). +func (e ErrStateFile) Error() string { + return fmt.Sprintf("failed to open statefile: %v", e.err) +} + +// SaveOpts contains save-related options. +type SaveOpts struct { + // Destination is the save target. + Destination io.Writer + + // Key is used for state integrity check. + Key []byte + + // Metadata is save metadata. + Metadata map[string]string + + // CompressionLevel is the compression level to use. + // + // See statefile.NewWriter for details. + CompressionLevel int + + // Callback is called prior to unpause, with any save error. + Callback func(err error) +} + +// Save saves the system state. +func (opts SaveOpts) Save(k *kernel.Kernel, w *watchdog.Watchdog) error { + log.Infof("Sandbox save started, pausing all tasks.") + k.Pause() + defer k.Unpause() + defer log.Infof("Tasks resumed after save.") + + w.Stop() + defer w.Start() + + // Supplement the metadata. + if opts.Metadata == nil { + opts.Metadata = make(map[string]string) + } + addSaveMetadata(opts.Metadata) + + // Open the statefile. + wc, err := statefile.NewWriter(opts.Destination, opts.Key, opts.Metadata, opts.CompressionLevel) + if err != nil { + err = ErrStateFile{err} + } else { + // Save the kernel. + err = k.SaveTo(wc) + if closeErr := wc.Close(); err == nil && closeErr != nil { + err = closeErr + } + if err != nil { + err = ErrStateFile{err} + } + } + opts.Callback(err) + return err +} + +// LoadOpts contains load-related options. +type LoadOpts struct { + // Destination is the load source. + Source io.Reader + + // Key is used for state integrity check. + Key []byte +} + +// Load loads the given kernel, setting the provided platform and stack. +func (opts LoadOpts) Load(k *kernel.Kernel, p platform.Platform, n inet.Stack) error { + // Open the file. + r, _, err := statefile.NewReader(opts.Source, opts.Key) + if err != nil { + return ErrStateFile{err} + } + + // Restore the Kernel object graph. + return k.LoadFrom(r, p, n) +} diff --git a/pkg/sentry/state/state_metadata.go b/pkg/sentry/state/state_metadata.go new file mode 100644 index 000000000..ac374f428 --- /dev/null +++ b/pkg/sentry/state/state_metadata.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "fmt" + "time" +) + +// The save metadata keys for timestamp. +const ( + metadataTimestamp = "timestamp" +) + +func addSaveMetadata(m map[string]string) { + m[metadataTimestamp] = fmt.Sprintf("%v", time.Now()) +} diff --git a/pkg/sentry/strace/BUILD b/pkg/sentry/strace/BUILD new file mode 100644 index 000000000..c5946a564 --- /dev/null +++ b/pkg/sentry/strace/BUILD @@ -0,0 +1,48 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "strace", + srcs = [ + "clone.go", + "futex.go", + "linux64.go", + "open.go", + "ptrace.go", + "socket.go", + "strace.go", + "syscalls.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/strace", + visibility = ["//:sandbox"], + deps = [ + ":strace_go_proto", + "//pkg/abi", + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/bits", + "//pkg/eventchannel", + "//pkg/sentry/arch", + "//pkg/sentry/kernel", + "//pkg/sentry/socket/control", + "//pkg/sentry/socket/epsocket", + "//pkg/sentry/socket/netlink", + "//pkg/sentry/syscalls/linux", + "//pkg/sentry/usermem", + ], +) + +proto_library( + name = "strace_proto", + srcs = ["strace.proto"], + visibility = ["//visibility:public"], +) + +go_proto_library( + name = "strace_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/strace/strace_go_proto", + proto = ":strace_proto", + visibility = ["//visibility:public"], +) diff --git a/pkg/sentry/strace/clone.go b/pkg/sentry/strace/clone.go new file mode 100644 index 000000000..b82ca1ad1 --- /dev/null +++ b/pkg/sentry/strace/clone.go @@ -0,0 +1,113 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi" +) + +// CloneFlagSet is the set of clone(2) flags. +var CloneFlagSet = abi.FlagSet{ + { + Flag: syscall.CLONE_VM, + Name: "CLONE_VM", + }, + { + Flag: syscall.CLONE_FS, + Name: "CLONE_FS", + }, + { + Flag: syscall.CLONE_FILES, + Name: "CLONE_FILES", + }, + { + Flag: syscall.CLONE_SIGHAND, + Name: "CLONE_SIGHAND", + }, + { + Flag: syscall.CLONE_PTRACE, + Name: "CLONE_PTRACE", + }, + { + Flag: syscall.CLONE_VFORK, + Name: "CLONE_VFORK", + }, + { + Flag: syscall.CLONE_PARENT, + Name: "CLONE_PARENT", + }, + { + Flag: syscall.CLONE_THREAD, + Name: "CLONE_THREAD", + }, + { + Flag: syscall.CLONE_NEWNS, + Name: "CLONE_NEWNS", + }, + { + Flag: syscall.CLONE_SYSVSEM, + Name: "CLONE_SYSVSEM", + }, + { + Flag: syscall.CLONE_SETTLS, + Name: "CLONE_SETTLS", + }, + { + Flag: syscall.CLONE_PARENT_SETTID, + Name: "CLONE_PARENT_SETTID", + }, + { + Flag: syscall.CLONE_CHILD_CLEARTID, + Name: "CLONE_CHILD_CLEARTID", + }, + { + Flag: syscall.CLONE_DETACHED, + Name: "CLONE_DETACHED", + }, + { + Flag: syscall.CLONE_UNTRACED, + Name: "CLONE_UNTRACED", + }, + { + Flag: syscall.CLONE_CHILD_SETTID, + Name: "CLONE_CHILD_SETTID", + }, + { + Flag: syscall.CLONE_NEWUTS, + Name: "CLONE_NEWUTS", + }, + { + Flag: syscall.CLONE_NEWIPC, + Name: "CLONE_NEWIPC", + }, + { + Flag: syscall.CLONE_NEWUSER, + Name: "CLONE_NEWUSER", + }, + { + Flag: syscall.CLONE_NEWPID, + Name: "CLONE_NEWPID", + }, + { + Flag: syscall.CLONE_NEWNET, + Name: "CLONE_NEWNET", + }, + { + Flag: syscall.CLONE_IO, + Name: "CLONE_IO", + }, +} diff --git a/pkg/sentry/strace/futex.go b/pkg/sentry/strace/futex.go new file mode 100644 index 000000000..3da108cb7 --- /dev/null +++ b/pkg/sentry/strace/futex.go @@ -0,0 +1,91 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// FutexCmd are the possible futex(2) commands. +var FutexCmd = abi.ValueSet{ + { + Value: linux.FUTEX_WAIT, + Name: "FUTEX_WAIT", + }, + { + Value: linux.FUTEX_WAKE, + Name: "FUTEX_WAKE", + }, + { + Value: linux.FUTEX_FD, + Name: "FUTEX_FD", + }, + { + Value: linux.FUTEX_REQUEUE, + Name: "FUTEX_REQUEUE", + }, + { + Value: linux.FUTEX_CMP_REQUEUE, + Name: "FUTEX_CMP_REQUEUE", + }, + { + Value: linux.FUTEX_WAKE_OP, + Name: "FUTEX_WAKE_OP", + }, + { + Value: linux.FUTEX_LOCK_PI, + Name: "FUTEX_LOCK_PI", + }, + { + Value: linux.FUTEX_UNLOCK_PI, + Name: "FUTEX_UNLOCK_PI", + }, + { + Value: linux.FUTEX_TRYLOCK_PI, + Name: "FUTEX_TRYLOCK_PI", + }, + { + Value: linux.FUTEX_WAIT_BITSET, + Name: "FUTEX_WAIT_BITSET", + }, + { + Value: linux.FUTEX_WAKE_BITSET, + Name: "FUTEX_WAKE_BITSET", + }, + { + Value: linux.FUTEX_WAIT_REQUEUE_PI, + Name: "FUTEX_WAIT_REQUEUE_PI", + }, + { + Value: linux.FUTEX_CMP_REQUEUE_PI, + Name: "FUTEX_CMP_REQUEUE_PI", + }, +} + +func futex(op uint64) string { + cmd := op &^ (linux.FUTEX_PRIVATE_FLAG | linux.FUTEX_CLOCK_REALTIME) + clockRealtime := (op & linux.FUTEX_CLOCK_REALTIME) == linux.FUTEX_CLOCK_REALTIME + private := (op & linux.FUTEX_PRIVATE_FLAG) == linux.FUTEX_PRIVATE_FLAG + + s := FutexCmd.Parse(cmd) + if clockRealtime { + s += "|FUTEX_CLOCK_REALTIME" + } + if private { + s += "|FUTEX_PRIVATE_FLAG" + } + return s +} diff --git a/pkg/sentry/strace/linux64.go b/pkg/sentry/strace/linux64.go new file mode 100644 index 000000000..90ea8c36f --- /dev/null +++ b/pkg/sentry/strace/linux64.go @@ -0,0 +1,338 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +// linuxAMD64 provides a mapping of the Linux amd64 syscalls and their argument +// types for display / formatting. +var linuxAMD64 = SyscallMap{ + 0: makeSyscallInfo("read", Hex, ReadBuffer, Hex), + 1: makeSyscallInfo("write", Hex, WriteBuffer, Hex), + 2: makeSyscallInfo("open", Path, OpenFlags, Mode), + 3: makeSyscallInfo("close", Hex), + 4: makeSyscallInfo("stat", Path, Stat), + 5: makeSyscallInfo("fstat", Hex, Stat), + 6: makeSyscallInfo("lstat", Path, Stat), + 7: makeSyscallInfo("poll", Hex, Hex, Hex), + 8: makeSyscallInfo("lseek", Hex, Hex, Hex), + 9: makeSyscallInfo("mmap", Hex, Hex, Hex, Hex, Hex, Hex), + 10: makeSyscallInfo("mprotect", Hex, Hex, Hex), + 11: makeSyscallInfo("munmap", Hex, Hex), + 12: makeSyscallInfo("brk", Hex), + 13: makeSyscallInfo("rt_sigaction", Hex, Hex, Hex), + 14: makeSyscallInfo("rt_sigprocmask", Hex, Hex, Hex, Hex), + 15: makeSyscallInfo("rt_sigreturn"), + 16: makeSyscallInfo("ioctl", Hex, Hex, Hex), + 17: makeSyscallInfo("pread64", Hex, ReadBuffer, Hex, Hex), + 18: makeSyscallInfo("pwrite64", Hex, WriteBuffer, Hex, Hex), + 19: makeSyscallInfo("readv", Hex, ReadIOVec, Hex), + 20: makeSyscallInfo("writev", Hex, WriteIOVec, Hex), + 21: makeSyscallInfo("access", Path, Oct), + 22: makeSyscallInfo("pipe", PipeFDs), + 23: makeSyscallInfo("select", Hex, Hex, Hex, Hex, Timeval), + 24: makeSyscallInfo("sched_yield"), + 25: makeSyscallInfo("mremap", Hex, Hex, Hex, Hex, Hex), + 26: makeSyscallInfo("msync", Hex, Hex, Hex), + 27: makeSyscallInfo("mincore", Hex, Hex, Hex), + 28: makeSyscallInfo("madvise", Hex, Hex, Hex), + 29: makeSyscallInfo("shmget", Hex, Hex, Hex), + 30: makeSyscallInfo("shmat", Hex, Hex, Hex), + 31: makeSyscallInfo("shmctl", Hex, Hex, Hex), + 32: makeSyscallInfo("dup", Hex), + 33: makeSyscallInfo("dup2", Hex, Hex), + 34: makeSyscallInfo("pause"), + 35: makeSyscallInfo("nanosleep", Timespec, PostTimespec), + 36: makeSyscallInfo("getitimer", Hex, PostItimerVal), + 37: makeSyscallInfo("alarm", Hex), + 38: makeSyscallInfo("setitimer", Hex, ItimerVal, PostItimerVal), + 39: makeSyscallInfo("getpid"), + 40: makeSyscallInfo("sendfile", Hex, Hex, Hex, Hex), + 41: makeSyscallInfo("socket", SockFamily, SockType, SockProtocol), + 42: makeSyscallInfo("connect", Hex, SockAddr, Hex), + 43: makeSyscallInfo("accept", Hex, PostSockAddr, SockLen), + 44: makeSyscallInfo("sendto", Hex, Hex, Hex, Hex, SockAddr, Hex), + 45: makeSyscallInfo("recvfrom", Hex, Hex, Hex, Hex, PostSockAddr, SockLen), + 46: makeSyscallInfo("sendmsg", Hex, SendMsgHdr, Hex), + 47: makeSyscallInfo("recvmsg", Hex, RecvMsgHdr, Hex), + 48: makeSyscallInfo("shutdown", Hex, Hex), + 49: makeSyscallInfo("bind", Hex, SockAddr, Hex), + 50: makeSyscallInfo("listen", Hex, Hex), + 51: makeSyscallInfo("getsockname", Hex, PostSockAddr, SockLen), + 52: makeSyscallInfo("getpeername", Hex, PostSockAddr, SockLen), + 53: makeSyscallInfo("socketpair", SockFamily, SockType, SockProtocol, Hex), + 54: makeSyscallInfo("setsockopt", Hex, Hex, Hex, Hex, Hex), + 55: makeSyscallInfo("getsockopt", Hex, Hex, Hex, Hex, Hex), + 56: makeSyscallInfo("clone", CloneFlags, Hex, Hex, Hex, Hex), + 57: makeSyscallInfo("fork"), + 58: makeSyscallInfo("vfork"), + 59: makeSyscallInfo("execve", Path, ExecveStringVector, ExecveStringVector), + 60: makeSyscallInfo("exit", Hex), + 61: makeSyscallInfo("wait4", Hex, Hex, Hex, Rusage), + 62: makeSyscallInfo("kill", Hex, Hex), + 63: makeSyscallInfo("uname", Uname), + 64: makeSyscallInfo("semget", Hex, Hex, Hex), + 65: makeSyscallInfo("semop", Hex, Hex, Hex), + 66: makeSyscallInfo("semctl", Hex, Hex, Hex, Hex), + 67: makeSyscallInfo("shmdt", Hex), + 68: makeSyscallInfo("msgget", Hex, Hex), + 69: makeSyscallInfo("msgsnd", Hex, Hex, Hex, Hex), + 70: makeSyscallInfo("msgrcv", Hex, Hex, Hex, Hex, Hex), + 71: makeSyscallInfo("msgctl", Hex, Hex, Hex), + 72: makeSyscallInfo("fcntl", Hex, Hex, Hex), + 73: makeSyscallInfo("flock", Hex, Hex), + 74: makeSyscallInfo("fsync", Hex), + 75: makeSyscallInfo("fdatasync", Hex), + 76: makeSyscallInfo("truncate", Path, Hex), + 77: makeSyscallInfo("ftruncate", Hex, Hex), + 78: makeSyscallInfo("getdents", Hex, Hex, Hex), + 79: makeSyscallInfo("getcwd", Hex, Hex), + 80: makeSyscallInfo("chdir", Path), + 81: makeSyscallInfo("fchdir", Hex), + 82: makeSyscallInfo("rename", Path, Path), + 83: makeSyscallInfo("mkdir", Path, Oct), + 84: makeSyscallInfo("rmdir", Path), + 85: makeSyscallInfo("creat", Path, Oct), + 86: makeSyscallInfo("link", Path, Path), + 87: makeSyscallInfo("unlink", Path), + 88: makeSyscallInfo("symlink", Path, Path), + 89: makeSyscallInfo("readlink", Path, ReadBuffer, Hex), + 90: makeSyscallInfo("chmod", Path, Mode), + 91: makeSyscallInfo("fchmod", Hex, Mode), + 92: makeSyscallInfo("chown", Path, Hex, Hex), + 93: makeSyscallInfo("fchown", Hex, Hex, Hex), + 94: makeSyscallInfo("lchown", Hex, Hex, Hex), + 95: makeSyscallInfo("umask", Hex), + 96: makeSyscallInfo("gettimeofday", Timeval, Hex), + 97: makeSyscallInfo("getrlimit", Hex, Hex), + 98: makeSyscallInfo("getrusage", Hex, Rusage), + 99: makeSyscallInfo("sysinfo", Hex), + 100: makeSyscallInfo("times", Hex), + 101: makeSyscallInfo("ptrace", PtraceRequest, Hex, Hex, Hex), + 102: makeSyscallInfo("getuid"), + 103: makeSyscallInfo("syslog", Hex, Hex, Hex), + 104: makeSyscallInfo("getgid"), + 105: makeSyscallInfo("setuid", Hex), + 106: makeSyscallInfo("setgid", Hex), + 107: makeSyscallInfo("geteuid"), + 108: makeSyscallInfo("getegid"), + 109: makeSyscallInfo("setpgid", Hex, Hex), + 110: makeSyscallInfo("getppid"), + 111: makeSyscallInfo("getpgrp"), + 112: makeSyscallInfo("setsid"), + 113: makeSyscallInfo("setreuid", Hex, Hex), + 114: makeSyscallInfo("setregid", Hex, Hex), + 115: makeSyscallInfo("getgroups", Hex, Hex), + 116: makeSyscallInfo("setgroups", Hex, Hex), + 117: makeSyscallInfo("setresuid", Hex, Hex, Hex), + 118: makeSyscallInfo("getresuid", Hex, Hex, Hex), + 119: makeSyscallInfo("setresgid", Hex, Hex, Hex), + 120: makeSyscallInfo("getresgid", Hex, Hex, Hex), + 121: makeSyscallInfo("getpgid", Hex), + 122: makeSyscallInfo("setfsuid", Hex), + 123: makeSyscallInfo("setfsgid", Hex), + 124: makeSyscallInfo("getsid", Hex), + 125: makeSyscallInfo("capget", Hex, Hex), + 126: makeSyscallInfo("capset", Hex, Hex), + 127: makeSyscallInfo("rt_sigpending", Hex), + 128: makeSyscallInfo("rt_sigtimedwait", Hex, Hex, Timespec, Hex), + 129: makeSyscallInfo("rt_sigqueueinfo", Hex, Hex, Hex), + 130: makeSyscallInfo("rt_sigsuspend", Hex), + 131: makeSyscallInfo("sigaltstack", Hex, Hex), + 132: makeSyscallInfo("utime", Path, Utimbuf), + 133: makeSyscallInfo("mknod", Path, Mode, Hex), + 134: makeSyscallInfo("uselib", Hex), + 135: makeSyscallInfo("personality", Hex), + 136: makeSyscallInfo("ustat", Hex, Hex), + 137: makeSyscallInfo("statfs", Path, Hex), + 138: makeSyscallInfo("fstatfs", Hex, Hex), + 139: makeSyscallInfo("sysfs", Hex, Hex, Hex), + 140: makeSyscallInfo("getpriority", Hex, Hex), + 141: makeSyscallInfo("setpriority", Hex, Hex, Hex), + 142: makeSyscallInfo("sched_setparam", Hex, Hex), + 143: makeSyscallInfo("sched_getparam", Hex, Hex), + 144: makeSyscallInfo("sched_setscheduler", Hex, Hex, Hex), + 145: makeSyscallInfo("sched_getscheduler", Hex), + 146: makeSyscallInfo("sched_get_priority_max", Hex), + 147: makeSyscallInfo("sched_get_priority_min", Hex), + 148: makeSyscallInfo("sched_rr_get_interval", Hex, Hex), + 149: makeSyscallInfo("mlock", Hex, Hex), + 150: makeSyscallInfo("munlock", Hex, Hex), + 151: makeSyscallInfo("mlockall", Hex), + 152: makeSyscallInfo("munlockall"), + 153: makeSyscallInfo("vhangup"), + 154: makeSyscallInfo("modify_ldt", Hex, Hex, Hex), + 155: makeSyscallInfo("pivot_root", Hex, Hex), + 156: makeSyscallInfo("_sysctl", Hex), + 157: makeSyscallInfo("prctl", Hex, Hex, Hex, Hex, Hex), + 158: makeSyscallInfo("arch_prctl", Hex, Hex), + 159: makeSyscallInfo("adjtimex", Hex), + 160: makeSyscallInfo("setrlimit", Hex, Hex), + 161: makeSyscallInfo("chroot", Path), + 162: makeSyscallInfo("sync"), + 163: makeSyscallInfo("acct", Hex), + 164: makeSyscallInfo("settimeofday", Timeval, Hex), + 165: makeSyscallInfo("mount", Path, Path, Path, Hex, Path), + 166: makeSyscallInfo("umount2", Path, Hex), + 167: makeSyscallInfo("swapon", Hex, Hex), + 168: makeSyscallInfo("swapoff", Hex), + 169: makeSyscallInfo("reboot", Hex, Hex, Hex, Hex), + 170: makeSyscallInfo("sethostname", Hex, Hex), + 171: makeSyscallInfo("setdomainname", Hex, Hex), + 172: makeSyscallInfo("iopl", Hex), + 173: makeSyscallInfo("ioperm", Hex, Hex, Hex), + 174: makeSyscallInfo("create_module", Path, Hex), + 175: makeSyscallInfo("init_module", Hex, Hex, Hex), + 176: makeSyscallInfo("delete_module", Hex, Hex), + 177: makeSyscallInfo("get_kernel_syms", Hex), + // 178: query_module (only present in Linux < 2.6) + 179: makeSyscallInfo("quotactl", Hex, Hex, Hex, Hex), + 180: makeSyscallInfo("nfsservctl", Hex, Hex, Hex), + // 181: getpmsg (not implemented in the Linux kernel) + // 182: putpmsg (not implemented in the Linux kernel) + // 183: afs_syscall (not implemented in the Linux kernel) + // 184: tuxcall (not implemented in the Linux kernel) + // 185: security (not implemented in the Linux kernel) + 186: makeSyscallInfo("gettid"), + 187: makeSyscallInfo("readahead", Hex, Hex, Hex), + 188: makeSyscallInfo("setxattr", Path, Path, Hex, Hex, Hex), + 189: makeSyscallInfo("lsetxattr", Path, Path, Hex, Hex, Hex), + 190: makeSyscallInfo("fsetxattr", Hex, Path, Hex, Hex, Hex), + 191: makeSyscallInfo("getxattr", Path, Path, Hex, Hex), + 192: makeSyscallInfo("lgetxattr", Path, Path, Hex, Hex), + 193: makeSyscallInfo("fgetxattr", Hex, Path, Hex, Hex), + 194: makeSyscallInfo("listxattr", Path, Path, Hex), + 195: makeSyscallInfo("llistxattr", Path, Path, Hex), + 196: makeSyscallInfo("flistxattr", Hex, Path, Hex), + 197: makeSyscallInfo("removexattr", Path, Path), + 198: makeSyscallInfo("lremovexattr", Path, Path), + 199: makeSyscallInfo("fremovexattr", Hex, Path), + 200: makeSyscallInfo("tkill", Hex, Hex), + 201: makeSyscallInfo("time", Hex), + 202: makeSyscallInfo("futex", Hex, FutexOp, Hex, Timespec, Hex, Hex), + 203: makeSyscallInfo("sched_setaffinity", Hex, Hex, Hex), + 204: makeSyscallInfo("sched_getaffinity", Hex, Hex, Hex), + 205: makeSyscallInfo("set_thread_area", Hex), + 206: makeSyscallInfo("io_setup", Hex, Hex), + 207: makeSyscallInfo("io_destroy", Hex), + 208: makeSyscallInfo("io_getevents", Hex, Hex, Hex, Hex, Timespec), + 209: makeSyscallInfo("io_submit", Hex, Hex, Hex), + 210: makeSyscallInfo("io_cancel", Hex, Hex, Hex), + 211: makeSyscallInfo("get_thread_area", Hex), + 212: makeSyscallInfo("lookup_dcookie", Hex, Hex, Hex), + 213: makeSyscallInfo("epoll_create", Hex), + // 214: epoll_ctl_old (not implemented in the Linux kernel) + // 215: epoll_wait_old (not implemented in the Linux kernel) + 216: makeSyscallInfo("remap_file_pages", Hex, Hex, Hex, Hex, Hex), + 217: makeSyscallInfo("getdents64", Hex, Hex, Hex), + 218: makeSyscallInfo("set_tid_address", Hex), + 219: makeSyscallInfo("restart_syscall"), + 220: makeSyscallInfo("semtimedop", Hex, Hex, Hex, Hex), + 221: makeSyscallInfo("fadvise64", Hex, Hex, Hex, Hex), + 222: makeSyscallInfo("timer_create", Hex, Hex, Hex), + 223: makeSyscallInfo("timer_settime", Hex, Hex, Hex, Hex), + 224: makeSyscallInfo("timer_gettime", Hex, Hex), + 225: makeSyscallInfo("timer_getoverrun", Hex), + 226: makeSyscallInfo("timer_delete", Hex), + 227: makeSyscallInfo("clock_settime", Hex, Timespec), + 228: makeSyscallInfo("clock_gettime", Hex, PostTimespec), + 229: makeSyscallInfo("clock_getres", Hex, PostTimespec), + 230: makeSyscallInfo("clock_nanosleep", Hex, Hex, Timespec, PostTimespec), + 231: makeSyscallInfo("exit_group", Hex), + 232: makeSyscallInfo("epoll_wait", Hex, Hex, Hex, Hex), + 233: makeSyscallInfo("epoll_ctl", Hex, Hex, Hex, Hex), + 234: makeSyscallInfo("tgkill", Hex, Hex, Hex), + 235: makeSyscallInfo("utimes", Path, Timeval), + // 236: vserver (not implemented in the Linux kernel) + 237: makeSyscallInfo("mbind", Hex, Hex, Hex, Hex, Hex, Hex), + 238: makeSyscallInfo("set_mempolicy", Hex, Hex, Hex), + 239: makeSyscallInfo("get_mempolicy", Hex, Hex, Hex, Hex, Hex), + 240: makeSyscallInfo("mq_open", Hex, Hex, Hex, Hex), + 241: makeSyscallInfo("mq_unlink", Hex), + 242: makeSyscallInfo("mq_timedsend", Hex, Hex, Hex, Hex, Hex), + 243: makeSyscallInfo("mq_timedreceive", Hex, Hex, Hex, Hex, Hex), + 244: makeSyscallInfo("mq_notify", Hex, Hex), + 245: makeSyscallInfo("mq_getsetattr", Hex, Hex, Hex), + 246: makeSyscallInfo("kexec_load", Hex, Hex, Hex, Hex), + 247: makeSyscallInfo("waitid", Hex, Hex, Hex, Hex, Rusage), + 248: makeSyscallInfo("add_key", Hex, Hex, Hex, Hex, Hex), + 249: makeSyscallInfo("request_key", Hex, Hex, Hex, Hex), + 250: makeSyscallInfo("keyctl", Hex, Hex, Hex, Hex, Hex), + 251: makeSyscallInfo("ioprio_set", Hex, Hex, Hex), + 252: makeSyscallInfo("ioprio_get", Hex, Hex), + 253: makeSyscallInfo("inotify_init"), + 254: makeSyscallInfo("inotify_add_watch", Hex, Hex, Hex), + 255: makeSyscallInfo("inotify_rm_watch", Hex, Hex), + 256: makeSyscallInfo("migrate_pages", Hex, Hex, Hex, Hex), + 257: makeSyscallInfo("openat", Hex, Path, Hex, Mode), + 258: makeSyscallInfo("mkdirat", Hex, Path, Hex), + 259: makeSyscallInfo("mknodat", Hex, Path, Mode, Hex), + 260: makeSyscallInfo("fchownat", Hex, Path, Hex, Hex, Hex), + 261: makeSyscallInfo("futimesat", Hex, Path, Hex), + 262: makeSyscallInfo("newfstatat", Hex, Path, Stat, Hex), + 263: makeSyscallInfo("unlinkat", Hex, Path, Hex), + 264: makeSyscallInfo("renameat", Hex, Path, Hex, Path), + 265: makeSyscallInfo("linkat", Hex, Path, Hex, Path, Hex), + 266: makeSyscallInfo("symlinkat", Path, Hex, Path), + 267: makeSyscallInfo("readlinkat", Hex, Path, ReadBuffer, Hex), + 268: makeSyscallInfo("fchmodat", Hex, Path, Mode), + 269: makeSyscallInfo("faccessat", Hex, Path, Oct, Hex), + 270: makeSyscallInfo("pselect6", Hex, Hex, Hex, Hex, Hex, Hex), + 271: makeSyscallInfo("ppoll", Hex, Hex, Timespec, Hex, Hex), + 272: makeSyscallInfo("unshare", Hex), + 273: makeSyscallInfo("set_robust_list", Hex, Hex), + 274: makeSyscallInfo("get_robust_list", Hex, Hex, Hex), + 275: makeSyscallInfo("splice", Hex, Hex, Hex, Hex, Hex, Hex), + 276: makeSyscallInfo("tee", Hex, Hex, Hex, Hex), + 277: makeSyscallInfo("sync_file_range", Hex, Hex, Hex, Hex), + 278: makeSyscallInfo("vmsplice", Hex, Hex, Hex, Hex), + 279: makeSyscallInfo("move_pages", Hex, Hex, Hex, Hex, Hex, Hex), + 280: makeSyscallInfo("utimensat", Hex, Path, UTimeTimespec, Hex), + 281: makeSyscallInfo("epoll_pwait", Hex, Hex, Hex, Hex, Hex, Hex), + 282: makeSyscallInfo("signalfd", Hex, Hex, Hex), + 283: makeSyscallInfo("timerfd_create", Hex, Hex), + 284: makeSyscallInfo("eventfd", Hex), + 285: makeSyscallInfo("fallocate", Hex, Hex, Hex, Hex), + 286: makeSyscallInfo("timerfd_settime", Hex, Hex, Hex, Hex), + 287: makeSyscallInfo("timerfd_gettime", Hex, Hex), + 288: makeSyscallInfo("accept4", Hex, PostSockAddr, SockLen, SockFlags), + 289: makeSyscallInfo("signalfd4", Hex, Hex, Hex, Hex), + 290: makeSyscallInfo("eventfd2", Hex, Hex), + 291: makeSyscallInfo("epoll_create1", Hex), + 292: makeSyscallInfo("dup3", Hex, Hex, Hex), + 293: makeSyscallInfo("pipe2", PipeFDs, Hex), + 294: makeSyscallInfo("inotify_init1", Hex), + 295: makeSyscallInfo("preadv", Hex, ReadIOVec, Hex, Hex), + 296: makeSyscallInfo("pwritev", Hex, WriteIOVec, Hex, Hex), + 297: makeSyscallInfo("rt_tgsigqueueinfo", Hex, Hex, Hex, Hex), + 298: makeSyscallInfo("perf_event_open", Hex, Hex, Hex, Hex, Hex), + 299: makeSyscallInfo("recvmmsg", Hex, Hex, Hex, Hex, Hex), + 300: makeSyscallInfo("fanotify_init", Hex, Hex), + 301: makeSyscallInfo("fanotify_mark", Hex, Hex, Hex, Hex, Hex), + 302: makeSyscallInfo("prlimit64", Hex, Hex, Hex, Hex), + 303: makeSyscallInfo("name_to_handle_at", Hex, Hex, Hex, Hex, Hex), + 304: makeSyscallInfo("open_by_handle_at", Hex, Hex, Hex), + 305: makeSyscallInfo("clock_adjtime", Hex, Hex), + 306: makeSyscallInfo("syncfs", Hex), + 307: makeSyscallInfo("sendmmsg", Hex, Hex, Hex, Hex), + 308: makeSyscallInfo("setns", Hex, Hex), + 309: makeSyscallInfo("getcpu", Hex, Hex, Hex), + 310: makeSyscallInfo("process_vm_readv", Hex, ReadIOVec, Hex, IOVec, Hex, Hex), + 311: makeSyscallInfo("process_vm_writev", Hex, IOVec, Hex, WriteIOVec, Hex, Hex), + 312: makeSyscallInfo("kcmp", Hex, Hex, Hex, Hex, Hex), + 313: makeSyscallInfo("finit_module", Hex, Hex, Hex), + 314: makeSyscallInfo("sched_setattr", Hex, Hex, Hex), + 315: makeSyscallInfo("sched_getattr", Hex, Hex, Hex), + 316: makeSyscallInfo("renameat2", Hex, Path, Hex, Path, Hex), + 317: makeSyscallInfo("seccomp", Hex, Hex, Hex), +} diff --git a/pkg/sentry/strace/open.go b/pkg/sentry/strace/open.go new file mode 100644 index 000000000..839d5eda7 --- /dev/null +++ b/pkg/sentry/strace/open.go @@ -0,0 +1,105 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi" +) + +// OpenMode represents the mode to open(2) a file. +var OpenMode = abi.ValueSet{ + { + Value: syscall.O_RDWR, + Name: "O_RDWR", + }, + { + Value: syscall.O_WRONLY, + Name: "O_WRONLY", + }, + { + Value: syscall.O_RDONLY, + Name: "O_RDONLY", + }, +} + +// OpenFlagSet is the set of open(2) flags. +var OpenFlagSet = abi.FlagSet{ + { + Flag: syscall.O_APPEND, + Name: "O_APPEND", + }, + { + Flag: syscall.O_ASYNC, + Name: "O_ASYNC", + }, + { + Flag: syscall.O_CLOEXEC, + Name: "O_CLOEXEC", + }, + { + Flag: syscall.O_CREAT, + Name: "O_CREAT", + }, + { + Flag: syscall.O_DIRECT, + Name: "O_DIRECT", + }, + { + Flag: syscall.O_DIRECTORY, + Name: "O_DIRECTORY", + }, + { + Flag: syscall.O_EXCL, + Name: "O_EXCL", + }, + { + Flag: syscall.O_NOATIME, + Name: "O_NOATIME", + }, + { + Flag: syscall.O_NOCTTY, + Name: "O_NOCTTY", + }, + { + Flag: syscall.O_NOFOLLOW, + Name: "O_NOFOLLOW", + }, + { + Flag: syscall.O_NONBLOCK, + Name: "O_NONBLOCK", + }, + { + Flag: 0x200000, // O_PATH + Name: "O_PATH", + }, + { + Flag: syscall.O_SYNC, + Name: "O_SYNC", + }, + { + Flag: syscall.O_TRUNC, + Name: "O_TRUNC", + }, +} + +func open(val uint64) string { + s := OpenMode.Parse(val & syscall.O_ACCMODE) + if flags := OpenFlagSet.Parse(val &^ syscall.O_ACCMODE); flags != "" { + s += "|" + flags + } + return s +} diff --git a/pkg/sentry/strace/ptrace.go b/pkg/sentry/strace/ptrace.go new file mode 100644 index 000000000..a0dabb27a --- /dev/null +++ b/pkg/sentry/strace/ptrace.go @@ -0,0 +1,178 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// PtraceRequestSet are the possible ptrace(2) requests. +var PtraceRequestSet = abi.ValueSet{ + { + Value: syscall.PTRACE_TRACEME, + Name: "PTRACE_TRACEME", + }, + { + Value: syscall.PTRACE_PEEKTEXT, + Name: "PTRACE_PEEKTEXT", + }, + { + Value: syscall.PTRACE_PEEKDATA, + Name: "PTRACE_PEEKDATA", + }, + { + Value: syscall.PTRACE_PEEKUSR, + Name: "PTRACE_PEEKUSR", + }, + { + Value: syscall.PTRACE_POKETEXT, + Name: "PTRACE_POKETEXT", + }, + { + Value: syscall.PTRACE_POKEDATA, + Name: "PTRACE_POKEDATA", + }, + { + Value: syscall.PTRACE_POKEUSR, + Name: "PTRACE_POKEUSR", + }, + { + Value: syscall.PTRACE_CONT, + Name: "PTRACE_CONT", + }, + { + Value: syscall.PTRACE_KILL, + Name: "PTRACE_KILL", + }, + { + Value: syscall.PTRACE_SINGLESTEP, + Name: "PTRACE_SINGLESTEP", + }, + { + Value: syscall.PTRACE_ATTACH, + Name: "PTRACE_ATTACH", + }, + { + Value: syscall.PTRACE_DETACH, + Name: "PTRACE_DETACH", + }, + { + Value: syscall.PTRACE_SYSCALL, + Name: "PTRACE_SYSCALL", + }, + { + Value: syscall.PTRACE_SETOPTIONS, + Name: "PTRACE_SETOPTIONS", + }, + { + Value: syscall.PTRACE_GETEVENTMSG, + Name: "PTRACE_GETEVENTMSG", + }, + { + Value: syscall.PTRACE_GETSIGINFO, + Name: "PTRACE_GETSIGINFO", + }, + { + Value: syscall.PTRACE_SETSIGINFO, + Name: "PTRACE_SETSIGINFO", + }, + { + Value: syscall.PTRACE_GETREGSET, + Name: "PTRACE_GETREGSET", + }, + { + Value: syscall.PTRACE_SETREGSET, + Name: "PTRACE_SETREGSET", + }, + { + Value: kernel.PTRACE_SEIZE, + Name: "PTRACE_SEIZE", + }, + { + Value: kernel.PTRACE_INTERRUPT, + Name: "PTRACE_INTERRUPT", + }, + { + Value: kernel.PTRACE_LISTEN, + Name: "PTRACE_LISTEN", + }, + { + Value: kernel.PTRACE_PEEKSIGINFO, + Name: "PTRACE_PEEKSIGINFO", + }, + { + Value: kernel.PTRACE_GETSIGMASK, + Name: "PTRACE_GETSIGMASK", + }, + { + Value: kernel.PTRACE_SETSIGMASK, + Name: "PTRACE_SETSIGMASK", + }, + { + Value: syscall.PTRACE_GETREGS, + Name: "PTRACE_GETREGS", + }, + { + Value: syscall.PTRACE_SETREGS, + Name: "PTRACE_SETREGS", + }, + { + Value: syscall.PTRACE_GETFPREGS, + Name: "PTRACE_GETFPREGS", + }, + { + Value: syscall.PTRACE_SETFPREGS, + Name: "PTRACE_SETFPREGS", + }, + { + Value: syscall.PTRACE_GETFPXREGS, + Name: "PTRACE_GETFPXREGS", + }, + { + Value: syscall.PTRACE_SETFPXREGS, + Name: "PTRACE_SETFPXREGS", + }, + { + Value: syscall.PTRACE_OLDSETOPTIONS, + Name: "PTRACE_OLDSETOPTIONS", + }, + { + Value: syscall.PTRACE_GET_THREAD_AREA, + Name: "PTRACE_GET_THREAD_AREA", + }, + { + Value: syscall.PTRACE_SET_THREAD_AREA, + Name: "PTRACE_SET_THREAD_AREA", + }, + { + Value: syscall.PTRACE_ARCH_PRCTL, + Name: "PTRACE_ARCH_PRCTL", + }, + { + Value: syscall.PTRACE_SYSEMU, + Name: "PTRACE_SYSEMU", + }, + { + Value: syscall.PTRACE_SYSEMU_SINGLESTEP, + Name: "PTRACE_SYSEMU_SINGLESTEP", + }, + { + Value: syscall.PTRACE_SINGLEBLOCK, + Name: "PTRACE_SINGLEBLOCK", + }, +} diff --git a/pkg/sentry/strace/socket.go b/pkg/sentry/strace/socket.go new file mode 100644 index 000000000..48c072e96 --- /dev/null +++ b/pkg/sentry/strace/socket.go @@ -0,0 +1,674 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "fmt" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/epsocket" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/netlink" + slinux "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// SocketFamily are the possible socket(2) families. +var SocketFamily = abi.ValueSet{ + { + Value: linux.AF_UNSPEC, + Name: "AF_UNSPEC", + }, + { + Value: linux.AF_UNIX, + Name: "AF_UNIX", + }, + { + Value: linux.AF_INET, + Name: "AF_INET", + }, + { + Value: linux.AF_AX25, + Name: "AF_AX25", + }, + { + Value: linux.AF_IPX, + Name: "AF_IPX", + }, + { + Value: linux.AF_APPLETALK, + Name: "AF_APPLETALK", + }, + { + Value: linux.AF_NETROM, + Name: "AF_NETROM", + }, + { + Value: linux.AF_BRIDGE, + Name: "AF_BRIDGE", + }, + { + Value: linux.AF_ATMPVC, + Name: "AF_ATMPVC", + }, + { + Value: linux.AF_X25, + Name: "AF_X25", + }, + { + Value: linux.AF_INET6, + Name: "AF_INET6", + }, + { + Value: linux.AF_ROSE, + Name: "AF_ROSE", + }, + { + Value: linux.AF_DECnet, + Name: "AF_DECnet", + }, + { + Value: linux.AF_NETBEUI, + Name: "AF_NETBEUI", + }, + { + Value: linux.AF_SECURITY, + Name: "AF_SECURITY", + }, + { + Value: linux.AF_KEY, + Name: "AF_KEY", + }, + { + Value: linux.AF_NETLINK, + Name: "AF_NETLINK", + }, + { + Value: linux.AF_PACKET, + Name: "AF_PACKET", + }, + { + Value: linux.AF_ASH, + Name: "AF_ASH", + }, + { + Value: linux.AF_ECONET, + Name: "AF_ECONET", + }, + { + Value: linux.AF_ATMSVC, + Name: "AF_ATMSVC", + }, + { + Value: linux.AF_RDS, + Name: "AF_RDS", + }, + { + Value: linux.AF_SNA, + Name: "AF_SNA", + }, + { + Value: linux.AF_IRDA, + Name: "AF_IRDA", + }, + { + Value: linux.AF_PPPOX, + Name: "AF_PPPOX", + }, + { + Value: linux.AF_WANPIPE, + Name: "AF_WANPIPE", + }, + { + Value: linux.AF_LLC, + Name: "AF_LLC", + }, + { + Value: linux.AF_IB, + Name: "AF_IB", + }, + { + Value: linux.AF_MPLS, + Name: "AF_MPLS", + }, + { + Value: linux.AF_CAN, + Name: "AF_CAN", + }, + { + Value: linux.AF_TIPC, + Name: "AF_TIPC", + }, + { + Value: linux.AF_BLUETOOTH, + Name: "AF_BLUETOOTH", + }, + { + Value: linux.AF_IUCV, + Name: "AF_IUCV", + }, + { + Value: linux.AF_RXRPC, + Name: "AF_RXRPC", + }, + { + Value: linux.AF_ISDN, + Name: "AF_ISDN", + }, + { + Value: linux.AF_PHONET, + Name: "AF_PHONET", + }, + { + Value: linux.AF_IEEE802154, + Name: "AF_IEEE802154", + }, + { + Value: linux.AF_CAIF, + Name: "AF_CAIF", + }, + { + Value: linux.AF_ALG, + Name: "AF_ALG", + }, + { + Value: linux.AF_NFC, + Name: "AF_NFC", + }, + { + Value: linux.AF_VSOCK, + Name: "AF_VSOCK", + }, +} + +// SocketType are the possible socket(2) types. +var SocketType = abi.ValueSet{ + { + Value: linux.SOCK_STREAM, + Name: "SOCK_STREAM", + }, + { + Value: linux.SOCK_DGRAM, + Name: "SOCK_DGRAM", + }, + { + Value: linux.SOCK_RAW, + Name: "SOCK_RAW", + }, + { + Value: linux.SOCK_RDM, + Name: "SOCK_RDM", + }, + { + Value: linux.SOCK_SEQPACKET, + Name: "SOCK_SEQPACKET", + }, + { + Value: linux.SOCK_DCCP, + Name: "SOCK_DCCP", + }, + { + Value: linux.SOCK_PACKET, + Name: "SOCK_PACKET", + }, +} + +// SocketFlagSet are the possible socket(2) flags. +var SocketFlagSet = abi.FlagSet{ + { + Flag: linux.SOCK_CLOEXEC, + Name: "SOCK_CLOEXEC", + }, + { + Flag: linux.SOCK_NONBLOCK, + Name: "SOCK_NONBLOCK", + }, +} + +// ipProtocol are the possible socket(2) types for INET and INET6 sockets. +var ipProtocol = abi.ValueSet{ + { + Value: linux.IPPROTO_IP, + Name: "IPPROTO_IP", + }, + { + Value: linux.IPPROTO_ICMP, + Name: "IPPROTO_ICMP", + }, + { + Value: linux.IPPROTO_IGMP, + Name: "IPPROTO_IGMP", + }, + { + Value: linux.IPPROTO_IPIP, + Name: "IPPROTO_IPIP", + }, + { + Value: linux.IPPROTO_TCP, + Name: "IPPROTO_TCP", + }, + { + Value: linux.IPPROTO_EGP, + Name: "IPPROTO_EGP", + }, + { + Value: linux.IPPROTO_PUP, + Name: "IPPROTO_PUP", + }, + { + Value: linux.IPPROTO_UDP, + Name: "IPPROTO_UDP", + }, + { + Value: linux.IPPROTO_IDP, + Name: "IPPROTO_IDP", + }, + { + Value: linux.IPPROTO_TP, + Name: "IPPROTO_TP", + }, + { + Value: linux.IPPROTO_DCCP, + Name: "IPPROTO_DCCP", + }, + { + Value: linux.IPPROTO_IPV6, + Name: "IPPROTO_IPV6", + }, + { + Value: linux.IPPROTO_RSVP, + Name: "IPPROTO_RSVP", + }, + { + Value: linux.IPPROTO_GRE, + Name: "IPPROTO_GRE", + }, + { + Value: linux.IPPROTO_ESP, + Name: "IPPROTO_ESP", + }, + { + Value: linux.IPPROTO_AH, + Name: "IPPROTO_AH", + }, + { + Value: linux.IPPROTO_MTP, + Name: "IPPROTO_MTP", + }, + { + Value: linux.IPPROTO_BEETPH, + Name: "IPPROTO_BEETPH", + }, + { + Value: linux.IPPROTO_ENCAP, + Name: "IPPROTO_ENCAP", + }, + { + Value: linux.IPPROTO_PIM, + Name: "IPPROTO_PIM", + }, + { + Value: linux.IPPROTO_COMP, + Name: "IPPROTO_COMP", + }, + { + Value: linux.IPPROTO_SCTP, + Name: "IPPROTO_SCTP", + }, + { + Value: linux.IPPROTO_UDPLITE, + Name: "IPPROTO_UDPLITE", + }, + { + Value: linux.IPPROTO_MPLS, + Name: "IPPROTO_MPLS", + }, + { + Value: linux.IPPROTO_RAW, + Name: "IPPROTO_RAW", + }, +} + +// SocketProtocol are the possible socket(2) protocols for each protocol family. +var SocketProtocol = map[int32]abi.ValueSet{ + linux.AF_INET: ipProtocol, + linux.AF_INET6: ipProtocol, + linux.AF_NETLINK: { + { + Value: linux.NETLINK_ROUTE, + Name: "NETLINK_ROUTE", + }, + { + Value: linux.NETLINK_UNUSED, + Name: "NETLINK_UNUSED", + }, + { + Value: linux.NETLINK_USERSOCK, + Name: "NETLINK_USERSOCK", + }, + { + Value: linux.NETLINK_FIREWALL, + Name: "NETLINK_FIREWALL", + }, + { + Value: linux.NETLINK_SOCK_DIAG, + Name: "NETLINK_SOCK_DIAG", + }, + { + Value: linux.NETLINK_NFLOG, + Name: "NETLINK_NFLOG", + }, + { + Value: linux.NETLINK_XFRM, + Name: "NETLINK_XFRM", + }, + { + Value: linux.NETLINK_SELINUX, + Name: "NETLINK_SELINUX", + }, + { + Value: linux.NETLINK_ISCSI, + Name: "NETLINK_ISCSI", + }, + { + Value: linux.NETLINK_AUDIT, + Name: "NETLINK_AUDIT", + }, + { + Value: linux.NETLINK_FIB_LOOKUP, + Name: "NETLINK_FIB_LOOKUP", + }, + { + Value: linux.NETLINK_CONNECTOR, + Name: "NETLINK_CONNECTOR", + }, + { + Value: linux.NETLINK_NETFILTER, + Name: "NETLINK_NETFILTER", + }, + { + Value: linux.NETLINK_IP6_FW, + Name: "NETLINK_IP6_FW", + }, + { + Value: linux.NETLINK_DNRTMSG, + Name: "NETLINK_DNRTMSG", + }, + { + Value: linux.NETLINK_KOBJECT_UEVENT, + Name: "NETLINK_KOBJECT_UEVENT", + }, + { + Value: linux.NETLINK_GENERIC, + Name: "NETLINK_GENERIC", + }, + { + Value: linux.NETLINK_SCSITRANSPORT, + Name: "NETLINK_SCSITRANSPORT", + }, + { + Value: linux.NETLINK_ECRYPTFS, + Name: "NETLINK_ECRYPTFS", + }, + { + Value: linux.NETLINK_RDMA, + Name: "NETLINK_RDMA", + }, + { + Value: linux.NETLINK_CRYPTO, + Name: "NETLINK_CRYPTO", + }, + }, +} + +var controlMessageType = map[int32]string{ + linux.SCM_RIGHTS: "SCM_RIGHTS", + linux.SCM_CREDENTIALS: "SCM_CREDENTIALS", +} + +func cmsghdr(t *kernel.Task, addr usermem.Addr, length uint64, maxBytes uint64) string { + if length > maxBytes { + return fmt.Sprintf("%#x (error decoding control: invalid length (%d))", addr, length) + } + + buf := make([]byte, length) + if _, err := t.CopyIn(addr, &buf); err != nil { + return fmt.Sprintf("%#x (error decoding control: %v)", addr, err) + } + + var strs []string + + for i := 0; i < len(buf); { + if i+linux.SizeOfControlMessageHeader > len(buf) { + strs = append(strs, "{invalid control message (too short)}") + break + } + + var h linux.ControlMessageHeader + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageHeader], usermem.ByteOrder, &h) + i += linux.SizeOfControlMessageHeader + + var skipData bool + level := "SOL_SOCKET" + if h.Level != linux.SOL_SOCKET { + skipData = true + level = fmt.Sprint(h.Level) + } + + typ, ok := controlMessageType[h.Type] + if !ok { + skipData = true + typ = fmt.Sprint(h.Type) + } + + if h.Length > uint64(len(buf)-i) { + strs = append(strs, fmt.Sprintf( + "{level=%s, type=%s, length=%d, content extends beyond buffer}", + level, + typ, + h.Length, + )) + break + } + + width := t.Arch().Width() + length := int(h.Length) - linux.SizeOfControlMessageHeader + + if skipData { + strs = append(strs, fmt.Sprintf("{level=%s, type=%s, length=%d}", level, typ, h.Length)) + i += control.AlignUp(i+length, width) + continue + } + + switch h.Type { + case linux.SCM_RIGHTS: + rightsSize := control.AlignDown(length, linux.SizeOfControlMessageRight) + + numRights := rightsSize / linux.SizeOfControlMessageRight + fds := make(linux.ControlMessageRights, numRights) + binary.Unmarshal(buf[i:i+rightsSize], usermem.ByteOrder, &fds) + + rights := make([]string, 0, len(fds)) + for _, fd := range fds { + rights = append(rights, fmt.Sprint(fd)) + } + + strs = append(strs, fmt.Sprintf( + "{level=%s, type=%s, length=%d, content: %s}", + level, + typ, + h.Length, + strings.Join(rights, ","), + )) + + i += control.AlignUp(length, width) + + case linux.SCM_CREDENTIALS: + if length < linux.SizeOfControlMessageCredentials { + strs = append(strs, fmt.Sprintf( + "{level=%s, type=%s, length=%d, content too short}", + level, + typ, + h.Length, + )) + i += control.AlignUp(length, width) + break + } + + var creds linux.ControlMessageCredentials + binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageCredentials], usermem.ByteOrder, &creds) + + strs = append(strs, fmt.Sprintf( + "{level=%s, type=%s, length=%d, pid: %d, uid: %d, gid: %d}", + level, + typ, + h.Length, + creds.PID, + creds.UID, + creds.GID, + )) + + i += control.AlignUp(length, width) + + default: + panic("unreachable") + } + } + + return fmt.Sprintf("%#x %s", addr, strings.Join(strs, ", ")) +} + +func msghdr(t *kernel.Task, addr usermem.Addr, printContent bool, maxBytes uint64) string { + var msg slinux.MessageHeader64 + if err := slinux.CopyInMessageHeader64(t, addr, &msg); err != nil { + return fmt.Sprintf("%#x (error decoding msghdr: %v)", addr, err) + } + s := fmt.Sprintf( + "%#x {name=%#x, namelen=%d, iovecs=%s", + addr, + msg.Name, + msg.NameLen, + iovecs(t, usermem.Addr(msg.Iov), int(msg.IovLen), printContent, maxBytes), + ) + if printContent { + s = fmt.Sprintf("%s, control={%s}", s, cmsghdr(t, usermem.Addr(msg.Control), msg.ControlLen, maxBytes)) + } else { + s = fmt.Sprintf("%s, control=%#x, control_len=%d", s, msg.Control, msg.ControlLen) + } + return fmt.Sprintf("%s, flags=%d}", s, msg.Flags) +} + +func sockAddr(t *kernel.Task, addr usermem.Addr, length uint32) string { + if addr == 0 { + return "null" + } + + b, err := slinux.CaptureAddress(t, addr, length) + if err != nil { + return fmt.Sprintf("%#x {error reading address: %v}", addr, err) + } + + // Extract address family. + if len(b) < 2 { + return fmt.Sprintf("%#x {address too short: %d bytes}", addr, len(b)) + } + family := usermem.ByteOrder.Uint16(b) + + familyStr := SocketFamily.Parse(uint64(family)) + + switch family { + case linux.AF_INET, linux.AF_INET6, linux.AF_UNIX: + fa, err := epsocket.GetAddress(int(family), b) + if err != nil { + return fmt.Sprintf("%#x {Family: %s, error extracting address: %v}", addr, familyStr, err) + } + + if family == linux.AF_UNIX { + return fmt.Sprintf("%#x {Family: %s, Addr: %q}", addr, familyStr, string(fa.Addr)) + } + + return fmt.Sprintf("%#x {Family: %s, Addr: %v, Port: %d}", addr, familyStr, fa.Addr, fa.Port) + case linux.AF_NETLINK: + sa, err := netlink.ExtractSockAddr(b) + if err != nil { + return fmt.Sprintf("%#x {Family: %s, error extracting address: %v}", addr, familyStr, err) + } + return fmt.Sprintf("%#x {Family: %s, PortID: %d, Groups: %d}", addr, familyStr, sa.PortID, sa.Groups) + default: + return fmt.Sprintf("%#x {Family: %s, family addr format unknown}", addr, familyStr) + } +} + +func postSockAddr(t *kernel.Task, addr usermem.Addr, lengthPtr usermem.Addr) string { + if addr == 0 { + return "null" + } + + if lengthPtr == 0 { + return fmt.Sprintf("%#x {length null}", addr) + } + + l, err := copySockLen(t, lengthPtr) + if err != nil { + return fmt.Sprintf("%#x {error reading length: %v}", addr, err) + } + + return sockAddr(t, addr, l) +} + +func copySockLen(t *kernel.Task, addr usermem.Addr) (uint32, error) { + // socklen_t is 32-bits. + var l uint32 + _, err := t.CopyIn(addr, &l) + return l, err +} + +func sockLenPointer(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + l, err := copySockLen(t, addr) + if err != nil { + return fmt.Sprintf("%#x {error reading length: %v}", addr, err) + } + return fmt.Sprintf("%#x {length=%v}", addr, l) +} + +func sockType(stype int32) string { + s := SocketType.Parse(uint64(stype & linux.SOCK_TYPE_MASK)) + if flags := SocketFlagSet.Parse(uint64(stype &^ linux.SOCK_TYPE_MASK)); flags != "" { + s += "|" + flags + } + return s +} + +func sockProtocol(family, protocol int32) string { + protocols, ok := SocketProtocol[family] + if !ok { + return fmt.Sprintf("%#x", protocol) + } + return protocols.Parse(uint64(protocol)) +} + +func sockFlags(flags int32) string { + if flags == 0 { + return "0" + } + return SocketFlagSet.Parse(uint64(flags)) +} diff --git a/pkg/sentry/strace/strace.go b/pkg/sentry/strace/strace.go new file mode 100644 index 000000000..4cd16d2f8 --- /dev/null +++ b/pkg/sentry/strace/strace.go @@ -0,0 +1,666 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package strace implements the logic to print out the input and the return value +// of each traced syscall. +package strace + +import ( + "encoding/binary" + "fmt" + "strconv" + "strings" + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bits" + "gvisor.googlesource.com/gvisor/pkg/eventchannel" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + pb "gvisor.googlesource.com/gvisor/pkg/sentry/strace/strace_go_proto" + slinux "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// DefaultLogMaximumSize is the default LogMaximumSize. +const DefaultLogMaximumSize = 1024 + +// LogMaximumSize determines the maximum display size for data blobs (read, +// write, etc.). +var LogMaximumSize uint = DefaultLogMaximumSize + +// EventMaximumSize determines the maximum size for data blobs (read, write, +// etc.) sent over the event channel. Default is 0 because most clients cannot +// do anything useful with binary text dump of byte array arguments. +var EventMaximumSize uint + +func iovecs(t *kernel.Task, addr usermem.Addr, iovcnt int, printContent bool, maxBytes uint64) string { + if iovcnt < 0 || iovcnt > linux.UIO_MAXIOV { + return fmt.Sprintf("%#x (error decoding iovecs: invalid iovcnt)", addr) + } + ars, err := t.CopyInIovecs(addr, iovcnt) + if err != nil { + return fmt.Sprintf("%#x (error decoding iovecs: %v)", addr, err) + } + + var totalBytes uint64 + var truncated bool + iovs := make([]string, iovcnt) + for i := 0; !ars.IsEmpty(); i, ars = i+1, ars.Tail() { + ar := ars.Head() + if ar.Length() == 0 || !printContent { + iovs[i] = fmt.Sprintf("{base=%#x, len=%d}", ar.Start, ar.Length()) + continue + } + + size := uint64(ar.Length()) + if truncated || totalBytes+size > maxBytes { + truncated = true + size = maxBytes - totalBytes + } else { + totalBytes += uint64(ar.Length()) + } + + b := make([]byte, size) + amt, err := t.CopyIn(ar.Start, b) + if err != nil { + iovs[i] = fmt.Sprintf("{base=%#x, len=%d, %q..., error decoding string: %v}", ar.Start, ar.Length(), b[:amt], err) + continue + } + + dot := "" + if truncated { + // Indicate truncation. + dot = "..." + } + iovs[i] = fmt.Sprintf("{base=%#x, len=%d, %q%s}", ar.Start, ar.Length(), b[:amt], dot) + } + + return fmt.Sprintf("%#x %s", addr, strings.Join(iovs, ", ")) +} + +func dump(t *kernel.Task, addr usermem.Addr, size uint, maximumBlobSize uint) string { + origSize := size + if size > maximumBlobSize { + size = maximumBlobSize + } + if size == 0 { + return "" + } + + b := make([]byte, size) + amt, err := t.CopyIn(addr, b) + if err != nil { + return fmt.Sprintf("%#x (error decoding string: %s)", addr, err) + } + + dot := "" + if uint(amt) < origSize { + // ... if we truncated the dump. + dot = "..." + } + + return fmt.Sprintf("%#x %q%s", addr, b[:amt], dot) +} + +func path(t *kernel.Task, addr usermem.Addr) string { + path, err := t.CopyInString(addr, syscall.PathMax) + if err != nil { + return fmt.Sprintf("%#x (error decoding path: %s)", addr, err) + } + return fmt.Sprintf("%#x %s", addr, path) +} + +func fdpair(t *kernel.Task, addr usermem.Addr) string { + var fds [2]int32 + _, err := t.CopyIn(addr, &fds) + if err != nil { + return fmt.Sprintf("%#x (error decoding fds: %s)", addr, err) + } + + return fmt.Sprintf("%#x [%d %d]", addr, fds[0], fds[1]) +} + +func uname(t *kernel.Task, addr usermem.Addr) string { + var u linux.UtsName + if _, err := t.CopyIn(addr, &u); err != nil { + return fmt.Sprintf("%#x (error decoding utsname: %s)", addr, err) + } + + return fmt.Sprintf("%#x %s", addr, u) +} + +func utimensTimespec(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var tim linux.Timespec + if _, err := t.CopyIn(addr, &tim); err != nil { + return fmt.Sprintf("%#x (error decoding timespec: %s)", addr, err) + } + + var ns string + switch tim.Nsec { + case linux.UTIME_NOW: + ns = "UTIME_NOW" + case linux.UTIME_OMIT: + ns = "UTIME_OMIT" + default: + ns = fmt.Sprintf("%v", tim.Nsec) + } + return fmt.Sprintf("%#x {sec=%v nsec=%s}", addr, tim.Sec, ns) +} + +func timespec(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var tim linux.Timespec + if _, err := t.CopyIn(addr, &tim); err != nil { + return fmt.Sprintf("%#x (error decoding timespec: %s)", addr, err) + } + return fmt.Sprintf("%#x {sec=%v nsec=%v}", addr, tim.Sec, tim.Nsec) +} + +func timeval(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var tim linux.Timeval + if _, err := t.CopyIn(addr, &tim); err != nil { + return fmt.Sprintf("%#x (error decoding timeval: %s)", addr, err) + } + + return fmt.Sprintf("%#x {sec=%v usec=%v}", addr, tim.Sec, tim.Usec) +} + +func utimbuf(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var utim syscall.Utimbuf + if _, err := t.CopyIn(addr, &utim); err != nil { + return fmt.Sprintf("%#x (error decoding utimbuf: %s)", addr, err) + } + + return fmt.Sprintf("%#x {actime=%v, modtime=%v}", addr, utim.Actime, utim.Modtime) +} + +func stat(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var stat linux.Stat + if _, err := t.CopyIn(addr, &stat); err != nil { + return fmt.Sprintf("%#x (error decoding stat: %s)", addr, err) + } + return fmt.Sprintf("%#x {dev=%d, ino=%d, mode=%s, nlink=%d, uid=%d, gid=%d, rdev=%d, size=%d, blksize=%d, blocks=%d, atime=%s, mtime=%s, ctime=%s}", addr, stat.Dev, stat.Ino, linux.FileMode(stat.Mode), stat.Nlink, stat.UID, stat.GID, stat.Rdev, stat.Size, stat.Blksize, stat.Blocks, time.Unix(stat.ATime.Sec, stat.ATime.Nsec), time.Unix(stat.MTime.Sec, stat.MTime.Nsec), time.Unix(stat.CTime.Sec, stat.CTime.Nsec)) +} + +func itimerval(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + interval := timeval(t, addr) + value := timeval(t, addr+usermem.Addr(binary.Size(linux.Timeval{}))) + return fmt.Sprintf("%#x {interval=%s, value=%s}", addr, interval, value) +} + +func stringVector(t *kernel.Task, addr usermem.Addr) string { + vec, err := t.CopyInVector(addr, slinux.ExecMaxElemSize, slinux.ExecMaxTotalSize) + if err != nil { + return fmt.Sprintf("%#x {error copying vector: %v}", addr, err) + } + s := fmt.Sprintf("%#x [", addr) + for i, v := range vec { + if i != 0 { + s += ", " + } + s += fmt.Sprintf("%q", v) + } + s += "]" + return s +} + +func rusage(t *kernel.Task, addr usermem.Addr) string { + if addr == 0 { + return "null" + } + + var ru linux.Rusage + if _, err := t.CopyIn(addr, &ru); err != nil { + return fmt.Sprintf("%#x (error decoding rusage: %s)", addr, err) + } + return fmt.Sprintf("%#x %+v", addr, ru) +} + +// pre fills in the pre-execution arguments for a system call. If an argument +// cannot be interpreted before the system call is executed, then a hex value +// will be used. Note that a full output slice will always be provided, that is +// len(return) == len(args). +func (i *SyscallInfo) pre(t *kernel.Task, args arch.SyscallArguments, maximumBlobSize uint) []string { + var output []string + + for arg := range args { + if arg >= len(i.format) { + break + } + switch i.format[arg] { + case WriteBuffer: + output = append(output, dump(t, args[arg].Pointer(), args[arg+1].SizeT(), maximumBlobSize)) + case WriteIOVec: + output = append(output, iovecs(t, args[arg].Pointer(), int(args[arg+1].Int()), true /* content */, uint64(maximumBlobSize))) + case IOVec: + output = append(output, iovecs(t, args[arg].Pointer(), int(args[arg+1].Int()), false /* content */, uint64(maximumBlobSize))) + case SendMsgHdr: + output = append(output, msghdr(t, args[arg].Pointer(), true /* content */, uint64(maximumBlobSize))) + case RecvMsgHdr: + output = append(output, msghdr(t, args[arg].Pointer(), false /* content */, uint64(maximumBlobSize))) + case Path: + output = append(output, path(t, args[arg].Pointer())) + case ExecveStringVector: + output = append(output, stringVector(t, args[arg].Pointer())) + case SockAddr: + output = append(output, sockAddr(t, args[arg].Pointer(), uint32(args[arg+1].Uint64()))) + case SockLen: + output = append(output, sockLenPointer(t, args[arg].Pointer())) + case SockFamily: + output = append(output, SocketFamily.Parse(uint64(args[arg].Int()))) + case SockType: + output = append(output, sockType(args[arg].Int())) + case SockProtocol: + output = append(output, sockProtocol(args[arg-2].Int(), args[arg].Int())) + case SockFlags: + output = append(output, sockFlags(args[arg].Int())) + case Timespec: + output = append(output, timespec(t, args[arg].Pointer())) + case UTimeTimespec: + output = append(output, utimensTimespec(t, args[arg].Pointer())) + case ItimerVal: + output = append(output, itimerval(t, args[arg].Pointer())) + case Timeval: + output = append(output, timeval(t, args[arg].Pointer())) + case Utimbuf: + output = append(output, utimbuf(t, args[arg].Pointer())) + case CloneFlags: + output = append(output, CloneFlagSet.Parse(uint64(args[arg].Uint()))) + case OpenFlags: + output = append(output, open(uint64(args[arg].Uint()))) + case Mode: + output = append(output, linux.FileMode(args[arg].ModeT()).String()) + case FutexOp: + output = append(output, futex(uint64(args[arg].Uint()))) + case PtraceRequest: + output = append(output, PtraceRequestSet.Parse(args[arg].Uint64())) + case Oct: + output = append(output, "0o"+strconv.FormatUint(args[arg].Uint64(), 8)) + case Hex: + fallthrough + default: + output = append(output, "0x"+strconv.FormatUint(args[arg].Uint64(), 16)) + } + } + + return output +} + +// post fills in the post-execution arguments for a system call. This modifies +// the given output slice in place with arguments that may only be interpreted +// after the system call has been executed. +func (i *SyscallInfo) post(t *kernel.Task, args arch.SyscallArguments, rval uintptr, output []string, maximumBlobSize uint) { + for arg := range output { + if arg >= len(i.format) { + break + } + switch i.format[arg] { + case ReadBuffer: + output[arg] = dump(t, args[arg].Pointer(), uint(rval), maximumBlobSize) + case ReadIOVec: + printLength := uint64(rval) + if printLength > uint64(maximumBlobSize) { + printLength = uint64(maximumBlobSize) + } + output[arg] = iovecs(t, args[arg].Pointer(), int(args[arg+1].Int()), true /* content */, printLength) + case WriteIOVec, IOVec, WriteBuffer: + // We already have a big blast from write. + output[arg] = "..." + case SendMsgHdr: + output[arg] = msghdr(t, args[arg].Pointer(), false /* content */, uint64(maximumBlobSize)) + case RecvMsgHdr: + output[arg] = msghdr(t, args[arg].Pointer(), true /* content */, uint64(maximumBlobSize)) + case PipeFDs: + output[arg] = fdpair(t, args[arg].Pointer()) + case Uname: + output[arg] = uname(t, args[arg].Pointer()) + case Stat: + output[arg] = stat(t, args[arg].Pointer()) + case PostSockAddr: + output[arg] = postSockAddr(t, args[arg].Pointer(), args[arg+1].Pointer()) + case SockLen: + output[arg] = sockLenPointer(t, args[arg].Pointer()) + case PostTimespec: + output[arg] = timespec(t, args[arg].Pointer()) + case PostItimerVal: + output[arg] = itimerval(t, args[arg].Pointer()) + case Timeval: + output[arg] = timeval(t, args[arg].Pointer()) + case Rusage: + output[arg] = rusage(t, args[arg].Pointer()) + } + } +} + +// printEntry prints the given system call entry. +func (i *SyscallInfo) printEnter(t *kernel.Task, args arch.SyscallArguments) []string { + output := i.pre(t, args, LogMaximumSize) + + switch len(output) { + case 0: + t.Infof("%s E %s()", t.Name(), i.name) + case 1: + t.Infof("%s E %s(%s)", t.Name(), i.name, + output[0]) + case 2: + t.Infof("%s E %s(%s, %s)", t.Name(), i.name, + output[0], output[1]) + case 3: + t.Infof("%s E %s(%s, %s, %s)", t.Name(), i.name, + output[0], output[1], output[2]) + case 4: + t.Infof("%s E %s(%s, %s, %s, %s)", t.Name(), i.name, + output[0], output[1], output[2], output[3]) + case 5: + t.Infof("%s E %s(%s, %s, %s, %s, %s)", t.Name(), i.name, + output[0], output[1], output[2], output[3], output[4]) + case 6: + t.Infof("%s E %s(%s, %s, %s, %s, %s, %s)", t.Name(), i.name, + output[0], output[1], output[2], output[3], output[4], output[5]) + } + + return output +} + +// printExit prints the given system call exit. +func (i *SyscallInfo) printExit(t *kernel.Task, elapsed time.Duration, output []string, args arch.SyscallArguments, retval uintptr, err error, errno int) { + var rval string + if err == nil { + // Fill in the output after successful execution. + i.post(t, args, retval, output, LogMaximumSize) + rval = fmt.Sprintf("%#x (%v)", retval, elapsed) + } else { + rval = fmt.Sprintf("%#x errno=%d (%s) (%v)", retval, errno, err, elapsed) + } + + switch len(output) { + case 0: + t.Infof("%s X %s() = %s", t.Name(), i.name, + rval) + case 1: + t.Infof("%s X %s(%s) = %s", t.Name(), i.name, + output[0], rval) + case 2: + t.Infof("%s X %s(%s, %s) = %s", t.Name(), i.name, + output[0], output[1], rval) + case 3: + t.Infof("%s X %s(%s, %s, %s) = %s", t.Name(), i.name, + output[0], output[1], output[2], rval) + case 4: + t.Infof("%s X %s(%s, %s, %s, %s) = %s", t.Name(), i.name, + output[0], output[1], output[2], output[3], rval) + case 5: + t.Infof("%s X %s(%s, %s, %s, %s, %s) = %s", t.Name(), i.name, + output[0], output[1], output[2], output[3], output[4], rval) + case 6: + t.Infof("%s X %s(%s, %s, %s, %s, %s, %s) = %s", t.Name(), i.name, + output[0], output[1], output[2], output[3], output[4], output[5], rval) + } +} + +// sendEnter sends the syscall enter to event log. +func (i *SyscallInfo) sendEnter(t *kernel.Task, args arch.SyscallArguments) []string { + output := i.pre(t, args, EventMaximumSize) + + event := pb.Strace{ + Process: t.Name(), + Function: i.name, + Info: &pb.Strace_Enter{ + Enter: &pb.StraceEnter{}, + }, + } + for _, arg := range output { + event.Args = append(event.Args, arg) + } + eventchannel.Emit(&event) + + return output +} + +// sendExit sends the syscall exit to event log. +func (i *SyscallInfo) sendExit(t *kernel.Task, elapsed time.Duration, output []string, args arch.SyscallArguments, rval uintptr, err error, errno int) { + if err == nil { + // Fill in the output after successful execution. + i.post(t, args, rval, output, EventMaximumSize) + } + + exit := &pb.StraceExit{ + Return: fmt.Sprintf("%#x", rval), + ElapsedNs: elapsed.Nanoseconds(), + } + if err != nil { + exit.Error = err.Error() + exit.ErrNo = int64(errno) + } + event := pb.Strace{ + Process: t.Name(), + Function: i.name, + Info: &pb.Strace_Exit{Exit: exit}, + } + for _, arg := range output { + event.Args = append(event.Args, arg) + } + eventchannel.Emit(&event) +} + +type syscallContext struct { + info SyscallInfo + args arch.SyscallArguments + start time.Time + logOutput []string + eventOutput []string + flags uint32 +} + +// SyscallEnter implements kernel.Stracer.SyscallEnter. It logs the syscall +// entry trace. +func (s SyscallMap) SyscallEnter(t *kernel.Task, sysno uintptr, args arch.SyscallArguments, flags uint32) interface{} { + info, ok := s[sysno] + if !ok { + info = SyscallInfo{ + name: fmt.Sprintf("sys_%d", sysno), + format: defaultFormat, + } + } + + var output, eventOutput []string + if bits.IsOn32(flags, kernel.StraceEnableLog) { + output = info.printEnter(t, args) + } + if bits.IsOn32(flags, kernel.StraceEnableEvent) { + eventOutput = info.sendEnter(t, args) + } + + return &syscallContext{ + info: info, + args: args, + start: time.Now(), + logOutput: output, + eventOutput: eventOutput, + flags: flags, + } +} + +// SyscallExit implements kernel.Stracer.SyscallExit. It logs the syscall +// exit trace. +func (s SyscallMap) SyscallExit(context interface{}, t *kernel.Task, sysno, rval uintptr, err error) { + errno := t.ExtractErrno(err, int(sysno)) + c := context.(*syscallContext) + + elapsed := time.Since(c.start) + if bits.IsOn32(c.flags, kernel.StraceEnableLog) { + c.info.printExit(t, elapsed, c.logOutput, c.args, rval, err, errno) + } + if bits.IsOn32(c.flags, kernel.StraceEnableEvent) { + c.info.sendExit(t, elapsed, c.eventOutput, c.args, rval, err, errno) + } +} + +// ConvertToSysnoMap converts the names to a map keyed on the syscall number and value set to true. +// The map is in a convenient format to call SyscallFlagsTable.Enable(). +func (s SyscallMap) ConvertToSysnoMap(syscalls []string) (map[uintptr]bool, error) { + if syscalls == nil { + // Sentinel: no list. + return nil, nil + } + + l := make(map[uintptr]bool) + for _, sc := range syscalls { + // Try to match this system call. + sysno, ok := s.ConvertToSysno(sc) + if !ok { + return nil, fmt.Errorf("syscall %q not found", sc) + } + l[sysno] = true + } + + // Success. + return l, nil +} + +// ConvertToSysno converts the name to system call number. Returns false +// if syscall with same name is not found. +func (s SyscallMap) ConvertToSysno(syscall string) (uintptr, bool) { + for sysno, info := range s { + if info.name != "" && info.name == syscall { + return sysno, true + } + } + return 0, false +} + +// Name returns the syscall name. +func (s SyscallMap) Name(sysno uintptr) string { + if info, ok := s[sysno]; ok { + return info.name + } + return fmt.Sprintf("sys_%d", sysno) +} + +// Initialize prepares all syscall tables for use by this package. +// +// N.B. This is not in an init function because we can't be sure all syscall +// tables are registered with the kernel when init runs. +// +// TODO: remove kernel package dependencies from this package and +// have the kernel package self-initialize all syscall tables. +func Initialize() { + for _, table := range kernel.SyscallTables() { + // Is this known? + sys, ok := Lookup(table.OS, table.Arch) + if !ok { + continue + } + + table.Stracer = sys + } +} + +// SinkType defines where to send straces to. +type SinkType uint32 + +const ( + // SinkTypeLog sends straces to text log + SinkTypeLog SinkType = 1 << iota + + // SinkTypeEvent sends strace to event log + SinkTypeEvent +) + +func convertToSyscallFlag(sinks SinkType) uint32 { + ret := uint32(0) + if bits.IsOn32(uint32(sinks), uint32(SinkTypeLog)) { + ret |= kernel.StraceEnableLog + } + if bits.IsOn32(uint32(sinks), uint32(SinkTypeEvent)) { + ret |= kernel.StraceEnableEvent + } + return ret +} + +// Enable enables the syscalls in whitelist in all syscall tables. +// +// Preconditions: Initialize has been called. +func Enable(whitelist []string, sinks SinkType) error { + flags := convertToSyscallFlag(sinks) + for _, table := range kernel.SyscallTables() { + // Is this known? + sys, ok := Lookup(table.OS, table.Arch) + if !ok { + continue + } + + // Convert to a set of system calls numbers. + wl, err := sys.ConvertToSysnoMap(whitelist) + if err != nil { + return err + } + + table.FeatureEnable.Enable(flags, wl, true) + } + + // Done. + return nil +} + +// Disable will disable Strace for all system calls and missing syscalls. +// +// Preconditions: Initialize has been called. +func Disable(sinks SinkType) { + flags := convertToSyscallFlag(sinks) + for _, table := range kernel.SyscallTables() { + // Strace will be disabled for all syscalls including missing. + table.FeatureEnable.Enable(flags, nil, false) + } +} + +// EnableAll enables all syscalls in all syscall tables. +// +// Preconditions: Initialize has been called. +func EnableAll(sinks SinkType) { + flags := convertToSyscallFlag(sinks) + for _, table := range kernel.SyscallTables() { + // Is this known? + if _, ok := Lookup(table.OS, table.Arch); !ok { + continue + } + + table.FeatureEnable.EnableAll(flags) + } +} diff --git a/pkg/sentry/strace/strace.proto b/pkg/sentry/strace/strace.proto new file mode 100644 index 000000000..914e8c7b0 --- /dev/null +++ b/pkg/sentry/strace/strace.proto @@ -0,0 +1,50 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +message Strace { + // Process name that made the syscall. + string process = 1; + + // Syscall function name. + string function = 2; + + // List of syscall arguments formatted as strings. + repeated string args = 3; + + oneof info { + StraceEnter enter = 4; + StraceExit exit = 5; + } +} + +message StraceEnter { +} + +message StraceExit { + // Return value formatted as string. + string return = 1; + + // Formatted error string in case syscall failed. + string error = 2; + + // Value of errno upon syscall exit. + int64 err_no = 3; // errno is a macro and gets expanded :-( + + // Time elapsed between syscall enter and exit. + int64 elapsed_ns = 4; +} diff --git a/pkg/sentry/strace/syscalls.go b/pkg/sentry/strace/syscalls.go new file mode 100644 index 000000000..d0e661706 --- /dev/null +++ b/pkg/sentry/strace/syscalls.go @@ -0,0 +1,217 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strace + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// FormatSpecifier values describe how an individual syscall argument should be +// formatted. +type FormatSpecifier int + +// Valid FormatSpecifiers. +// +// Unless otherwise specified, values are formatted before syscall execution +// and not updated after syscall execution (the same value is output). +const ( + // Hex is just a hexadecimal number. + Hex FormatSpecifier = iota + + // Oct is just an octal number. + Oct + + // ReadBuffer is a buffer for a read-style call. The syscall return + // value is used for the length. + // + // Formatted after syscall execution. + ReadBuffer + + // WriteBuffer is a buffer for a write-style call. The following arg is + // used for the length. + // + // Contents omitted after syscall execution. + WriteBuffer + + // ReadIOVec is a pointer to a struct iovec for a writev-style call. + // The following arg is used for the length. The return value is used + // for the total length. + // + // Complete contents only formatted after syscall execution. + ReadIOVec + + // WriteIOVec is a pointer to a struct iovec for a writev-style call. + // The following arg is used for the length. + // + // Complete contents only formatted before syscall execution, omitted + // after. + WriteIOVec + + // IOVec is a generic pointer to a struct iovec. Contents are not dumped. + IOVec + + // SendMsgHdr is a pointer to a struct msghdr for a sendmsg-style call. + // Contents formatted only before syscall execution, omitted after. + SendMsgHdr + + // RecvMsgHdr is a pointer to a struct msghdr for a recvmsg-style call. + // Contents formatted only after syscall execution. + RecvMsgHdr + + // Path is a pointer to a char* path. + Path + + // ExecveStringVector is a NULL-terminated array of strings. Enforces + // the maximum execve array length. + ExecveStringVector + + // PipeFDs is an array of two FDs, formatted after syscall execution. + PipeFDs + + // Uname is a pointer to a struct uname, formatted after syscall exection. + Uname + + // Stat is a pointer to a struct stat, formatted after syscall execution. + Stat + + // SockAddr is a pointer to a struct sockaddr. The following arg is + // used for length. + SockAddr + + // PostSockAddr is a pointer to a struct sockaddr, formatted after + // syscall execution. The following arg is a pointer to the socklen_t + // length. + PostSockAddr + + // SockLen is a pointer to a socklen_t, formatted before and after + // syscall execution. + SockLen + + // SockFamily is a socket protocol family value. + SockFamily + + // SockType is a socket type and flags value. + SockType + + // SockProtocol is a socket protocol value. Argument n-2 is the socket + // protocol family. + SockProtocol + + // SockFlags are socket flags. + SockFlags + + // Timespec is a pointer to a struct timespec. + Timespec + + // PostTimespec is a pointer to a struct timespec, formatted after + // syscall execution. + PostTimespec + + // UTimeTimespec is a pointer to a struct timespec. Formatting includes + // UTIME_NOW and UTIME_OMIT. + UTimeTimespec + + // ItimerVal is a pointer to a struct itimerval. + ItimerVal + + // ItimerVal is a pointer to a struct itimerval, formatted after + // syscall execution. + PostItimerVal + + // Timeval is a pointer to a struct timeval, formatted before and after + // syscall execution. + Timeval + + // Utimbuf is a pointer to a struct utimbuf. + Utimbuf + + // CloneFlags are clone(2) flags. + CloneFlags + + // OpenFlags are open(2) flags. + OpenFlags + + // Mode is a mode_t. + Mode + + // FutexOp is the futex(2) operation. + FutexOp + + // PtraceRequest is the ptrace(2) request. + PtraceRequest + + // Rusage is a struct rusage, formatted after syscall execution. + Rusage +) + +// defaultFormat is the syscall argument format to use if the actual format is +// not known. It formats all six arguments as hex. +var defaultFormat = []FormatSpecifier{Hex, Hex, Hex, Hex, Hex, Hex} + +// SyscallInfo captures the name and printing format of a syscall. +type SyscallInfo struct { + // name is the name of the syscall. + name string + + // format contains the format specifiers for each argument. + // + // Syscall calls can have up to six arguments. Arguments without a + // corresponding entry in format will not be printed. + format []FormatSpecifier +} + +// makeSyscallInfo returns a SyscallInfo for a syscall. +func makeSyscallInfo(name string, f ...FormatSpecifier) SyscallInfo { + return SyscallInfo{name: name, format: f} +} + +// SyscallMap maps syscalls into names and printing formats. +type SyscallMap map[uintptr]SyscallInfo + +var _ kernel.Stracer = (SyscallMap)(nil) + +// syscallTable contains the syscalls for a specific OS/Arch. +type syscallTable struct { + // os is the operating system this table targets. + os abi.OS + + // arch is the architecture this table targets. + arch arch.Arch + + // syscalls contains the syscall mappings. + syscalls SyscallMap +} + +// syscallTables contains all syscall tables. +var syscallTables = []syscallTable{ + { + os: abi.Linux, + arch: arch.AMD64, + syscalls: linuxAMD64, + }, +} + +// Lookup returns the SyscallMap for the OS/Arch combination. The returned map +// must not be changed. +func Lookup(os abi.OS, a arch.Arch) (SyscallMap, bool) { + for _, s := range syscallTables { + if s.os == os && s.arch == a { + return s.syscalls, true + } + } + return nil, false +} diff --git a/pkg/sentry/syscalls/BUILD b/pkg/sentry/syscalls/BUILD new file mode 100644 index 000000000..d667b42c8 --- /dev/null +++ b/pkg/sentry/syscalls/BUILD @@ -0,0 +1,43 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "syscalls", + srcs = [ + "epoll.go", + "polling.go", + "syscalls.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls", + visibility = ["//:sandbox"], + deps = [ + ":unimplemented_syscall_go_proto", + "//pkg/abi/linux", + "//pkg/eventchannel", + "//pkg/sentry/arch", + "//pkg/sentry/fs", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/epoll", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/time", + "//pkg/syserror", + "//pkg/waiter", + ], +) + +proto_library( + name = "unimplemented_syscall_proto", + srcs = ["unimplemented_syscall.proto"], + visibility = ["//visibility:public"], + deps = ["//pkg/sentry/arch:registers_proto"], +) + +go_proto_library( + name = "unimplemented_syscall_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/unimplemented_syscall_go_proto", + proto = ":unimplemented_syscall_proto", + visibility = ["//visibility:public"], + deps = ["//pkg/sentry/arch:registers_go_proto"], +) diff --git a/pkg/sentry/syscalls/epoll.go b/pkg/sentry/syscalls/epoll.go new file mode 100644 index 000000000..01dd6fa71 --- /dev/null +++ b/pkg/sentry/syscalls/epoll.go @@ -0,0 +1,174 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syscalls + +import ( + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/epoll" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// CreateEpoll implements the epoll_create(2) linux syscall. +func CreateEpoll(t *kernel.Task, closeOnExec bool) (kdefs.FD, error) { + file := epoll.NewEventPoll(t) + defer file.DecRef() + + flags := kernel.FDFlags{ + CloseOnExec: closeOnExec, + } + fd, err := t.FDMap().NewFDFrom(0, file, flags, t.ThreadGroup().Limits()) + if err != nil { + return 0, err + } + + return fd, nil +} + +// AddEpoll implements the epoll_ctl(2) linux syscall when op is EPOLL_CTL_ADD. +func AddEpoll(t *kernel.Task, epfd kdefs.FD, fd kdefs.FD, flags epoll.EntryFlags, mask waiter.EventMask, userData [2]int32) error { + // Get epoll from the file descriptor. + epollfile := t.FDMap().GetFile(epfd) + if epollfile == nil { + return syscall.EBADF + } + defer epollfile.DecRef() + + // Get the target file id. + file := t.FDMap().GetFile(fd) + if file == nil { + return syscall.EBADF + } + defer file.DecRef() + + // Extract the epollPoll operations. + e, ok := epollfile.FileOperations.(*epoll.EventPoll) + if !ok { + return syscall.EBADF + } + + // Try to add the entry. + return e.AddEntry(epoll.FileIdentifier{file, fd}, flags, mask, userData) +} + +// UpdateEpoll implements the epoll_ctl(2) linux syscall when op is EPOLL_CTL_MOD. +func UpdateEpoll(t *kernel.Task, epfd kdefs.FD, fd kdefs.FD, flags epoll.EntryFlags, mask waiter.EventMask, userData [2]int32) error { + // Get epoll from the file descriptor. + epollfile := t.FDMap().GetFile(epfd) + if epollfile == nil { + return syscall.EBADF + } + defer epollfile.DecRef() + + // Get the target file id. + file := t.FDMap().GetFile(fd) + if file == nil { + return syscall.EBADF + } + defer file.DecRef() + + // Extract the epollPoll operations. + e, ok := epollfile.FileOperations.(*epoll.EventPoll) + if !ok { + return syscall.EBADF + } + + // Try to update the entry. + return e.UpdateEntry(epoll.FileIdentifier{file, fd}, flags, mask, userData) +} + +// RemoveEpoll implements the epoll_ctl(2) linux syscall when op is EPOLL_CTL_DEL. +func RemoveEpoll(t *kernel.Task, epfd kdefs.FD, fd kdefs.FD) error { + // Get epoll from the file descriptor. + epollfile := t.FDMap().GetFile(epfd) + if epollfile == nil { + return syscall.EBADF + } + defer epollfile.DecRef() + + // Get the target file id. + file := t.FDMap().GetFile(fd) + if file == nil { + return syscall.EBADF + } + defer file.DecRef() + + // Extract the epollPoll operations. + e, ok := epollfile.FileOperations.(*epoll.EventPoll) + if !ok { + return syscall.EBADF + } + + // Try to remove the entry. + return e.RemoveEntry(epoll.FileIdentifier{file, fd}) +} + +// WaitEpoll implements the epoll_wait(2) linux syscall. +func WaitEpoll(t *kernel.Task, fd kdefs.FD, max int, timeout int) ([]epoll.Event, error) { + // Get epoll from the file descriptor. + epollfile := t.FDMap().GetFile(fd) + if epollfile == nil { + return nil, syscall.EBADF + } + defer epollfile.DecRef() + + // Extract the epollPoll operations. + e, ok := epollfile.FileOperations.(*epoll.EventPoll) + if !ok { + return nil, syscall.EBADF + } + + // Try to read events and return right away if we got them or if the + // caller requested a non-blocking "wait". + r := e.ReadEvents(max) + if len(r) != 0 || timeout == 0 { + return r, nil + } + + // We'll have to wait. Set up the timer if a timeout was specified and + // and register with the epoll object for readability events. + var haveDeadline bool + var deadline ktime.Time + if timeout > 0 { + timeoutDur := time.Duration(timeout) * time.Millisecond + deadline = t.Kernel().MonotonicClock().Now().Add(timeoutDur) + haveDeadline = true + } + + w, ch := waiter.NewChannelEntry(nil) + e.EventRegister(&w, waiter.EventIn) + defer e.EventUnregister(&w) + + // Try to read the events again until we succeed, timeout or get + // interrupted. + for { + r = e.ReadEvents(max) + if len(r) != 0 { + return r, nil + } + + if err := t.BlockWithDeadline(ch, haveDeadline, deadline); err != nil { + if err == syscall.ETIMEDOUT { + return nil, nil + } + + return nil, err + } + } +} diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD new file mode 100644 index 000000000..bc67ebf30 --- /dev/null +++ b/pkg/sentry/syscalls/linux/BUILD @@ -0,0 +1,103 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "linux_state", + srcs = [ + "sys_aio.go", + "sys_futex.go", + "sys_poll.go", + "sys_time.go", + ], + out = "linux_state.go", + package = "linux", +) + +go_library( + name = "linux", + srcs = [ + "error.go", + "flags.go", + "linux64.go", + "linux_state.go", + "sigset.go", + "sys_aio.go", + "sys_capability.go", + "sys_epoll.go", + "sys_eventfd.go", + "sys_file.go", + "sys_futex.go", + "sys_getdents.go", + "sys_identity.go", + "sys_inotify.go", + "sys_lseek.go", + "sys_mmap.go", + "sys_mount.go", + "sys_pipe.go", + "sys_poll.go", + "sys_prctl.go", + "sys_random.go", + "sys_read.go", + "sys_rlimit.go", + "sys_rusage.go", + "sys_sched.go", + "sys_sem.go", + "sys_signal.go", + "sys_socket.go", + "sys_stat.go", + "sys_sync.go", + "sys_sysinfo.go", + "sys_syslog.go", + "sys_thread.go", + "sys_time.go", + "sys_timer.go", + "sys_timerfd.go", + "sys_tls.go", + "sys_utsname.go", + "sys_write.go", + "timespec.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/linux", + visibility = ["//:sandbox"], + deps = [ + "//pkg/abi", + "//pkg/abi/linux", + "//pkg/binary", + "//pkg/bpf", + "//pkg/eventchannel", + "//pkg/log", + "//pkg/metric", + "//pkg/sentry/arch", + "//pkg/sentry/context", + "//pkg/sentry/fs", + "//pkg/sentry/fs/anon", + "//pkg/sentry/fs/fsutil", + "//pkg/sentry/fs/lock", + "//pkg/sentry/fs/timerfd", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/auth", + "//pkg/sentry/kernel/epoll", + "//pkg/sentry/kernel/eventfd", + "//pkg/sentry/kernel/kdefs", + "//pkg/sentry/kernel/pipe", + "//pkg/sentry/kernel/sched", + "//pkg/sentry/kernel/semaphore", + "//pkg/sentry/kernel/time", + "//pkg/sentry/limits", + "//pkg/sentry/memmap", + "//pkg/sentry/mm", + "//pkg/sentry/safemem", + "//pkg/sentry/socket", + "//pkg/sentry/socket/control", + "//pkg/sentry/syscalls", + "//pkg/sentry/usage", + "//pkg/sentry/usermem", + "//pkg/state", + "//pkg/syserr", + "//pkg/syserror", + "//pkg/tcpip/transport/unix", + "//pkg/waiter", + ], +) diff --git a/pkg/sentry/syscalls/linux/error.go b/pkg/sentry/syscalls/linux/error.go new file mode 100644 index 000000000..013b385bc --- /dev/null +++ b/pkg/sentry/syscalls/linux/error.go @@ -0,0 +1,117 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "io" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/metric" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +var ( + partialResultMetric = metric.MustCreateNewUint64Metric("/syscalls/partial_result", true /* sync */, "Whether or not a partial result has occurred for this sandbox.") + partialResultOnce sync.Once +) + +// handleIOError handles special error cases for partial results. For some +// errors, we may consume the error and return only the partial read/write. +// +// op and f are used only for panics. +func handleIOError(t *kernel.Task, partialResult bool, err, intr error, op string, f *fs.File) error { + switch err { + case nil: + // Typical successful syscall. + return nil + case io.EOF: + // EOF is always consumed. If this is a partial read/write + // (result != 0), the application will see that, otherwise + // they will see 0. + return nil + case syserror.ErrExceedsFileSizeLimit: + // Ignore partialResult because this error only applies to + // normal files, and for those files we cannot accumulate + // write results. + // + // Do not consume the error and return it as EFBIG. + // Simultaneously send a SIGXFSZ per setrlimit(2). + t.SendSignal(&arch.SignalInfo{ + Signo: int32(syscall.SIGXFSZ), + Code: arch.SignalInfoKernel, + }) + return syscall.EFBIG + case syserror.ErrInterrupted: + // The syscall was interrupted. Return nil if it completed + // partially, otherwise return the error code that the syscall + // needs (to indicate to the kernel what it should do). + if partialResult { + return nil + } + return intr + } + + if !partialResult { + // Typical syscall error. + return err + } + + switch err { + case syserror.EINTR: + // Syscall interrupted, but completed a partial + // read/write. Like ErrWouldBlock, since we have a + // partial read/write, we consume the error and return + // the partial result. + return nil + case syserror.EFAULT: + // EFAULT is only shown the user if nothing was + // read/written. If we read something (this case), they see + // a partial read/write. They will then presumably try again + // with an incremented buffer, which will EFAULT with + // result == 0. + return nil + case syserror.EPIPE: + // Writes to a pipe or socket will return EPIPE if the other + // side is gone. The partial write is returned. EPIPE will be + // returned on the next call. + // + // TODO: In some cases SIGPIPE should also be sent + // to the application. + return nil + case syserror.ErrWouldBlock: + // Syscall would block, but completed a partial read/write. + // This case should only be returned by IssueIO for nonblocking + // files. Since we have a partial read/write, we consume + // ErrWouldBlock, returning the partial result. + return nil + } + + switch err.(type) { + case kernel.SyscallRestartErrno: + // Identical to the EINTR case. + return nil + } + + // An unknown error is encountered with a partial read/write. + name, _ := f.Dirent.FullName(nil /* ignore chroot */) + log.Traceback("Invalid request partialResult %v and err (type %T) %v for %s operation on %q, %T", partialResult, err, err, op, name, f.FileOperations) + partialResultOnce.Do(partialResultMetric.Increment) + return nil +} diff --git a/pkg/sentry/syscalls/linux/flags.go b/pkg/sentry/syscalls/linux/flags.go new file mode 100644 index 000000000..82bfd7c2a --- /dev/null +++ b/pkg/sentry/syscalls/linux/flags.go @@ -0,0 +1,95 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// flagsToPermissions returns a Permissions object from Linux flags. +// This includes truncate permission if O_TRUNC is set in the mask. +func flagsToPermissions(mask uint) (p fs.PermMask) { + switch mask & syscall.O_ACCMODE { + case syscall.O_WRONLY: + p.Write = true + case syscall.O_RDWR: + p.Write = true + p.Read = true + case syscall.O_RDONLY: + p.Read = true + } + return +} + +// fdFlagsToLinux converts a kernel.FDFlags object to a Linux representation. +func fdFlagsToLinux(flags kernel.FDFlags) (mask uint) { + if flags.CloseOnExec { + mask |= syscall.FD_CLOEXEC + } + return +} + +// flagsToLinux converts a FileFlags object to a Linux representation. +func flagsToLinux(flags fs.FileFlags) (mask uint) { + if flags.Direct { + mask |= syscall.O_DIRECT + } + if flags.NonBlocking { + mask |= syscall.O_NONBLOCK + } + if flags.Sync { + mask |= syscall.O_SYNC + } + if flags.Append { + mask |= syscall.O_APPEND + } + if flags.Directory { + mask |= syscall.O_DIRECTORY + } + switch { + case flags.Read && flags.Write: + mask |= syscall.O_RDWR + case flags.Write: + mask |= syscall.O_WRONLY + case flags.Read: + mask |= syscall.O_RDONLY + } + return +} + +// linuxToFlags converts linux file flags to a FileFlags object. +func linuxToFlags(mask uint) (flags fs.FileFlags) { + return fs.FileFlags{ + Direct: mask&syscall.O_DIRECT != 0, + Sync: mask&syscall.O_SYNC != 0, + NonBlocking: mask&syscall.O_NONBLOCK != 0, + Read: (mask & syscall.O_ACCMODE) != syscall.O_WRONLY, + Write: (mask & syscall.O_ACCMODE) != syscall.O_RDONLY, + Append: mask&syscall.O_APPEND != 0, + Directory: mask&syscall.O_DIRECTORY != 0, + } +} + +// linuxToSettableFlags converts linux file flags to a SettableFileFlags object. +func linuxToSettableFlags(mask uint) fs.SettableFileFlags { + return fs.SettableFileFlags{ + Direct: mask&syscall.O_DIRECT != 0, + NonBlocking: mask&syscall.O_NONBLOCK != 0, + Append: mask&syscall.O_APPEND != 0, + } +} diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go new file mode 100644 index 000000000..44db2d582 --- /dev/null +++ b/pkg/sentry/syscalls/linux/linux64.go @@ -0,0 +1,376 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package linux provides syscall tables for amd64 Linux. +// +// NOTE: Linux i386 support has been removed. +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// AUDIT_ARCH_X86_64 identifies the Linux syscall API on AMD64, and is taken +// from <linux/audit.h>. +const _AUDIT_ARCH_X86_64 = 0xc000003e + +// AMD64 is a table of Linux amd64 syscall API with the corresponding syscall +// numbers from Linux 3.11. The entries commented out are those syscalls we +// don't currently support. +var AMD64 = &kernel.SyscallTable{ + OS: abi.Linux, + Arch: arch.AMD64, + Version: kernel.Version{ + Sysname: "Linux", + Release: "3.11.10", + Version: "#1 SMP Fri Nov 29 10:47:50 PST 2013", + }, + AuditNumber: _AUDIT_ARCH_X86_64, + Table: map[uintptr]kernel.SyscallFn{ + 0: Read, + 1: Write, + 2: Open, + 3: Close, + 4: Stat, + 5: Fstat, + 6: Lstat, + 7: Poll, + 8: Lseek, + 9: Mmap, + 10: Mprotect, + 11: Munmap, + 12: Brk, + 13: RtSigaction, + 14: RtSigprocmask, + 15: RtSigreturn, + 16: Ioctl, + 17: Pread64, + 18: Pwrite64, + 19: Readv, + 20: Writev, + 21: Access, + 22: Pipe, + 23: Select, + 24: SchedYield, + 25: Mremap, + 26: Msync, + 27: Mincore, + 28: Madvise, + // 29: Shmget, TODO + // 30: Shmat, TODO + // 31: Shmctl, TODO + 32: Dup, + 33: Dup2, + 34: Pause, + 35: Nanosleep, + 36: Getitimer, + 37: Alarm, + 38: Setitimer, + 39: Getpid, + 40: Sendfile, + 41: Socket, + 42: Connect, + 43: Accept, + 44: SendTo, + 45: RecvFrom, + 46: SendMsg, + 47: RecvMsg, + 48: Shutdown, + 49: Bind, + 50: Listen, + 51: GetSockName, + 52: GetPeerName, + 53: SocketPair, + 54: SetSockOpt, + 55: GetSockOpt, + 56: Clone, + 57: Fork, + 58: Vfork, + 59: Execve, + 60: Exit, + 61: Wait4, + 62: Kill, + 63: Uname, + 64: Semget, + 65: Semop, + 66: Semctl, + // 67: Shmdt, TODO + // 68: Msgget, TODO + // 69: Msgsnd, TODO + // 70: Msgrcv, TODO + // 71: Msgctl, TODO + 72: Fcntl, + 73: Flock, + 74: Fsync, + 75: Fdatasync, + 76: Truncate, + 77: Ftruncate, + 78: Getdents, + 79: Getcwd, + 80: Chdir, + 81: Fchdir, + 82: Rename, + 83: Mkdir, + 84: Rmdir, + 85: Creat, + 86: Link, + 87: Unlink, + 88: Symlink, + 89: Readlink, + 90: Chmod, + 91: Fchmod, + 92: Chown, + 93: Fchown, + 94: Lchown, + 95: Umask, + 96: Gettimeofday, + 97: Getrlimit, + 98: Getrusage, + 99: Sysinfo, + 100: Times, + 101: Ptrace, + 102: Getuid, + 103: Syslog, + 104: Getgid, + 105: Setuid, + 106: Setgid, + 107: Geteuid, + 108: Getegid, + 109: Setpgid, + 110: Getppid, + 111: Getpgrp, + 112: Setsid, + 113: Setreuid, + 114: Setregid, + 115: Getgroups, + 116: Setgroups, + 117: Setresuid, + 118: Getresuid, + 119: Setresgid, + 120: Getresgid, + 121: Getpgid, + // 122: Setfsuid, TODO + // 123: Setfsgid, TODO + 124: Getsid, + 125: Capget, + 126: Capset, + 127: RtSigpending, + 128: RtSigtimedwait, + 129: RtSigqueueinfo, + 130: RtSigsuspend, + 131: Sigaltstack, + 132: Utime, + 133: Mknod, + 134: syscalls.Error(syscall.ENOSYS), // Uselib, obsolete + 135: syscalls.ErrorWithEvent(syscall.EINVAL), // SetPersonality, unable to change personality + 136: syscalls.ErrorWithEvent(syscall.ENOSYS), // Ustat, needs filesystem support + 137: Statfs, + 138: Fstatfs, + // 139: Sysfs, TODO + 140: Getpriority, + 141: Setpriority, + 142: syscalls.CapError(linux.CAP_SYS_NICE), // SchedSetparam, requires cap_sys_nice + 143: SchedGetparam, + 144: SchedSetscheduler, + 145: SchedGetscheduler, + 146: SchedGetPriorityMax, + 147: SchedGetPriorityMin, + 148: syscalls.ErrorWithEvent(syscall.EPERM), // SchedRrGetInterval, + 149: syscalls.Error(nil), // Mlock, TODO + 150: syscalls.Error(nil), // Munlock, TODO + 151: syscalls.Error(nil), // Mlockall, TODO + 152: syscalls.Error(nil), // Munlockall, TODO + 153: syscalls.CapError(linux.CAP_SYS_TTY_CONFIG), // Vhangup, + 154: syscalls.Error(syscall.EPERM), // ModifyLdt, + 155: syscalls.Error(syscall.EPERM), // PivotRoot, + 156: syscalls.Error(syscall.EPERM), // Sysctl, syscall is "worthless" + 157: Prctl, + 158: ArchPrctl, + 159: syscalls.CapError(linux.CAP_SYS_TIME), // Adjtimex, requires cap_sys_time + 160: Setrlimit, + 161: Chroot, + 162: Sync, + 163: syscalls.CapError(linux.CAP_SYS_PACCT), // Acct, requires cap_sys_pacct + 164: syscalls.CapError(linux.CAP_SYS_TIME), // Settimeofday, requires cap_sys_time + 165: Mount, + 166: Umount2, + 167: syscalls.CapError(linux.CAP_SYS_ADMIN), // Swapon, requires cap_sys_admin + 168: syscalls.CapError(linux.CAP_SYS_ADMIN), // Swapoff, requires cap_sys_admin + 169: syscalls.CapError(linux.CAP_SYS_BOOT), // Reboot, requires cap_sys_boot + 170: Sethostname, + 171: Setdomainname, + 172: syscalls.CapError(linux.CAP_SYS_RAWIO), // Iopl, requires cap_sys_rawio + 173: syscalls.CapError(linux.CAP_SYS_RAWIO), // Ioperm, requires cap_sys_rawio + 174: syscalls.CapError(linux.CAP_SYS_MODULE), // CreateModule, requires cap_sys_module + 175: syscalls.CapError(linux.CAP_SYS_MODULE), // InitModule, requires cap_sys_module + 176: syscalls.CapError(linux.CAP_SYS_MODULE), // DeleteModule, requires cap_sys_module + 177: syscalls.Error(syscall.ENOSYS), // GetKernelSyms, not supported in > 2.6 + 178: syscalls.Error(syscall.ENOSYS), // QueryModule, not supported in > 2.6 + 179: syscalls.CapError(linux.CAP_SYS_ADMIN), // Quotactl, requires cap_sys_admin (most operations) + 180: syscalls.Error(syscall.ENOSYS), // Nfsservctl, does not exist > 3.1 + 181: syscalls.Error(syscall.ENOSYS), // Getpmsg, not implemented in Linux + 182: syscalls.Error(syscall.ENOSYS), // Putpmsg, not implemented in Linux + 183: syscalls.Error(syscall.ENOSYS), // AfsSyscall, not implemented in Linux + 184: syscalls.Error(syscall.ENOSYS), // Tuxcall, not implemented in Linux + 185: syscalls.Error(syscall.ENOSYS), // Security, not implemented in Linux + 186: Gettid, + 187: nil, // Readahead, TODO + 188: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Setxattr, requires filesystem support + 189: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Lsetxattr, requires filesystem support + 190: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Fsetxattr, requires filesystem support + 191: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Getxattr, requires filesystem support + 192: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Lgetxattr, requires filesystem support + 193: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Fgetxattr, requires filesystem support + 194: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Listxattr, requires filesystem support + 195: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Llistxattr, requires filesystem support + 196: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Flistxattr, requires filesystem support + 197: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Removexattr, requires filesystem support + 198: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Lremovexattr, requires filesystem support + 199: syscalls.ErrorWithEvent(syscall.ENOTSUP), // Fremovexattr, requires filesystem support + 200: Tkill, + 201: Time, + 202: Futex, + 203: SchedSetaffinity, + 204: SchedGetaffinity, + 205: syscalls.Error(syscall.ENOSYS), // SetThreadArea, expected to return ENOSYS on 64-bit + 206: IoSetup, + 207: IoDestroy, + 208: IoGetevents, + 209: IoSubmit, + 210: IoCancel, + 211: syscalls.Error(syscall.ENOSYS), // GetThreadArea, expected to return ENOSYS on 64-bit + 212: syscalls.CapError(linux.CAP_SYS_ADMIN), // LookupDcookie, requires cap_sys_admin + 213: EpollCreate, + 214: syscalls.ErrorWithEvent(syscall.ENOSYS), // EpollCtlOld, deprecated (afaik, unused) + 215: syscalls.ErrorWithEvent(syscall.ENOSYS), // EpollWaitOld, deprecated (afaik, unused) + 216: syscalls.ErrorWithEvent(syscall.ENOSYS), // RemapFilePages, deprecated since 3.16 + 217: Getdents64, + 218: SetTidAddress, + 219: RestartSyscall, + // 220: Semtimedop, TODO + 221: Fadvise64, + // 222: TimerCreate, TODO + // 223: TimerSettime, TODO + // 224: TimerGettime, TODO + // 225: TimerGetoverrun, TODO + // 226: TimerDelete, TODO + 227: ClockSettime, + 228: ClockGettime, + 229: ClockGetres, + 230: ClockNanosleep, + 231: ExitGroup, + 232: EpollWait, + 233: EpollCtl, + 234: Tgkill, + 235: Utimes, + 236: syscalls.Error(syscall.ENOSYS), // Vserver, not implemented by Linux + 237: syscalls.CapError(linux.CAP_SYS_NICE), // Mbind, may require cap_sys_nice TODO + 238: SetMempolicy, + 239: GetMempolicy, + // 240: MqOpen, TODO + // 241: MqUnlink, TODO + // 242: MqTimedsend, TODO + // 243: MqTimedreceive, TODO + // 244: MqNotify, TODO + // 245: MqGetsetattr, TODO + 246: syscalls.CapError(linux.CAP_SYS_BOOT), // kexec_load, requires cap_sys_boot + 247: Waitid, + 248: syscalls.Error(syscall.EACCES), // AddKey, not available to user + 249: syscalls.Error(syscall.EACCES), // RequestKey, not available to user + 250: syscalls.Error(syscall.EACCES), // Keyctl, not available to user + 251: syscalls.CapError(linux.CAP_SYS_ADMIN), // IoprioSet, requires cap_sys_nice or cap_sys_admin (depending) + 252: syscalls.CapError(linux.CAP_SYS_ADMIN), // IoprioGet, requires cap_sys_nice or cap_sys_admin (depending) + 253: InotifyInit, + 254: InotifyAddWatch, + 255: InotifyRmWatch, + 256: syscalls.CapError(linux.CAP_SYS_NICE), // MigratePages, requires cap_sys_nice + 257: Openat, + 258: Mkdirat, + 259: Mknodat, + 260: Fchownat, + 261: Futimesat, + 262: Fstatat, + 263: Unlinkat, + 264: Renameat, + 265: Linkat, + 266: Symlinkat, + 267: Readlinkat, + 268: Fchmodat, + 269: Faccessat, + 270: Pselect, + 271: Ppoll, + 272: Unshare, + 273: syscalls.Error(syscall.ENOSYS), // SetRobustList, obsolete + 274: syscalls.Error(syscall.ENOSYS), // GetRobustList, obsolete + // 275: Splice, TODO + // 276: Tee, TODO + // 277: SyncFileRange, TODO + // 278: Vmsplice, TODO + 279: syscalls.CapError(linux.CAP_SYS_NICE), // MovePages, requires cap_sys_nice (mostly) + 280: Utimensat, + 281: EpollPwait, + // 282: Signalfd, TODO + 283: TimerfdCreate, + 284: Eventfd, + 285: Fallocate, + 286: TimerfdSettime, + 287: TimerfdGettime, + 288: Accept4, + // 289: Signalfd4, TODO + 290: Eventfd2, + 291: EpollCreate1, + 292: Dup3, + 293: Pipe2, + 294: InotifyInit1, + 295: Preadv, + 296: Pwritev, + 297: RtTgsigqueueinfo, + 298: syscalls.ErrorWithEvent(syscall.ENODEV), // PerfEventOpen, no support for perf counters + 299: RecvMMsg, + 300: syscalls.ErrorWithEvent(syscall.ENOSYS), // FanotifyInit, needs CONFIG_FANOTIFY + 301: syscalls.ErrorWithEvent(syscall.ENOSYS), // FanotifyMark, needs CONFIG_FANOTIFY + 302: Prlimit64, + 303: syscalls.ErrorWithEvent(syscall.EOPNOTSUPP), // NameToHandleAt, needs filesystem support + 304: syscalls.ErrorWithEvent(syscall.EOPNOTSUPP), // OpenByHandleAt, needs filesystem support + 305: syscalls.CapError(linux.CAP_SYS_TIME), // ClockAdjtime, requires cap_sys_time + 306: Syncfs, + 307: SendMMsg, + // 308: Setns, TODO + 309: Getcpu, + // 310: ProcessVmReadv, TODO may require cap_sys_ptrace + // 311: ProcessVmWritev, TODO may require cap_sys_ptrace + 312: syscalls.CapError(linux.CAP_SYS_PTRACE), // Kcmp, requires cap_sys_ptrace + 313: syscalls.CapError(linux.CAP_SYS_MODULE), // FinitModule, requires cap_sys_module + // "Backports." + 318: GetRandom, + }, + + Emulate: map[usermem.Addr]uintptr{ + 0xffffffffff600000: 96, // vsyscall gettimeofday(2) + 0xffffffffff600400: 201, // vsyscall time(2) + 0xffffffffff600800: 309, // vsyscall getcpu(2) + }, + Missing: func(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, error) { + syscalls.UnimplementedEvent(t) + return 0, syserror.ENOSYS + }, +} diff --git a/pkg/sentry/syscalls/linux/sigset.go b/pkg/sentry/syscalls/linux/sigset.go new file mode 100644 index 000000000..bfb541634 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sigset.go @@ -0,0 +1,69 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// copyInSigSet copies in a sigset_t, checks its size, and ensures that KILL and +// STOP are clear. +func copyInSigSet(t *kernel.Task, sigSetAddr usermem.Addr, size uint) (linux.SignalSet, error) { + if size != linux.SignalSetSize { + return 0, syscall.EINVAL + } + b := t.CopyScratchBuffer(8) + if _, err := t.CopyInBytes(sigSetAddr, b); err != nil { + return 0, err + } + mask := usermem.ByteOrder.Uint64(b[:]) + return linux.SignalSet(mask) &^ kernel.UnblockableSignals, nil +} + +// copyOutSigSet copies out a sigset_t. +func copyOutSigSet(t *kernel.Task, sigSetAddr usermem.Addr, mask linux.SignalSet) error { + b := t.CopyScratchBuffer(8) + usermem.ByteOrder.PutUint64(b, uint64(mask)) + _, err := t.CopyOutBytes(sigSetAddr, b) + return err +} + +// copyInSigSetWithSize copies in a structure as below +// +// struct { +// sigset_t* sigset_addr; +// size_t sizeof_sigset; +// }; +// +// and returns sigset_addr and size. +func copyInSigSetWithSize(t *kernel.Task, addr usermem.Addr) (usermem.Addr, uint, error) { + switch t.Arch().Width() { + case 8: + in := t.CopyScratchBuffer(16) + if _, err := t.CopyInBytes(addr, in); err != nil { + return 0, 0, err + } + maskAddr := usermem.Addr(usermem.ByteOrder.Uint64(in[0:])) + maskSize := uint(usermem.ByteOrder.Uint64(in[8:])) + return maskAddr, maskSize, nil + default: + return 0, 0, syserror.ENOSYS + } +} diff --git a/pkg/sentry/syscalls/linux/sys_aio.go b/pkg/sentry/syscalls/linux/sys_aio.go new file mode 100644 index 000000000..80407a082 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_aio.go @@ -0,0 +1,402 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/eventfd" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// I/O commands. +const ( + _IOCB_CMD_PREAD = 0 + _IOCB_CMD_PWRITE = 1 + _IOCB_CMD_FSYNC = 2 + _IOCB_CMD_FDSYNC = 3 + _IOCB_CMD_NOOP = 6 + _IOCB_CMD_PREADV = 7 + _IOCB_CMD_PWRITEV = 8 +) + +// I/O flags. +const ( + _IOCB_FLAG_RESFD = 1 +) + +// ioCallback describes an I/O request. +// +// The priority field is currently ignored in the implementation below. Also +// note that the IOCB_FLAG_RESFD feature is not supported. +type ioCallback struct { + Data uint64 + Key uint32 + Reserved1 uint32 + + OpCode uint16 + ReqPrio int16 + FD uint32 + + Buf uint64 + Bytes uint64 + Offset int64 + + Reserved2 uint64 + Flags uint32 + + // eventfd to signal if IOCB_FLAG_RESFD is set in flags. + ResFD uint32 +} + +// ioEvent describes an I/O result. +type ioEvent struct { + Data uint64 + Obj uint64 + Result int64 + Result2 int64 +} + +// ioEventSize is the size of an ioEvent encoded. +var ioEventSize = binary.Size(ioEvent{}) + +// IoSetup implements linux syscall io_setup(2). +func IoSetup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + nrEvents := args[0].Int() + idAddr := args[1].Pointer() + + // Linux uses the native long as the aio ID. + // + // The context pointer _must_ be zero initially. + var idIn uint64 + if _, err := t.CopyIn(idAddr, &idIn); err != nil { + return 0, nil, err + } + if idIn != 0 { + return 0, nil, syserror.EINVAL + } + + id, err := t.MemoryManager().NewAIOContext(t, uint32(nrEvents)) + if err != nil { + return 0, nil, err + } + + // Copy out the new ID. + if _, err := t.CopyOut(idAddr, &id); err != nil { + t.MemoryManager().DestroyAIOContext(t, id) + return 0, nil, err + } + + return 0, nil, nil +} + +// IoDestroy implements linux syscall io_destroy(2). +func IoDestroy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + id := args[0].Uint64() + + // Destroy the given context. + if !t.MemoryManager().DestroyAIOContext(t, id) { + // Does not exist. + return 0, nil, syserror.EINVAL + } + // FIXME: Linux blocks until all AIO to the destroyed context is + // done. + return 0, nil, nil +} + +// IoGetevents implements linux syscall io_getevents(2). +func IoGetevents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + id := args[0].Uint64() + minEvents := args[1].Int() + events := args[2].Int() + eventsAddr := args[3].Pointer() + timespecAddr := args[4].Pointer() + + // Sanity check arguments. + if minEvents > events { + return 0, nil, syserror.EINVAL + } + + ctx, ok := t.MemoryManager().LookupAIOContext(t, id) + if !ok { + return 0, nil, syserror.EINVAL + } + + // Setup the timeout. + var haveDeadline bool + var deadline ktime.Time + if timespecAddr != 0 { + d, err := copyTimespecIn(t, timespecAddr) + if err != nil { + return 0, nil, err + } + if !d.Valid() { + return 0, nil, syserror.EINVAL + } + deadline = t.Kernel().MonotonicClock().Now().Add(d.ToDuration()) + haveDeadline = true + } + + // Loop over all requests. + for count := int32(0); count < events; count++ { + // Get a request, per semantics. + var v interface{} + if count >= minEvents { + var ok bool + v, ok = ctx.PopRequest() + if !ok { + return uintptr(count), nil, nil + } + } else { + var err error + v, err = waitForRequest(ctx, t, haveDeadline, deadline) + if err != nil { + if count > 0 || err == syserror.ETIMEDOUT { + return uintptr(count), nil, nil + } + return 0, nil, syserror.ConvertIntr(err, syserror.EINTR) + } + } + + ev := v.(*ioEvent) + + // Copy out the result. + if _, err := t.CopyOut(eventsAddr, ev); err != nil { + if count > 0 { + return uintptr(count), nil, nil + } + // Nothing done. + return 0, nil, err + } + + // Keep rolling. + eventsAddr += usermem.Addr(ioEventSize) + } + + // Everything finished. + return uintptr(events), nil, nil +} + +func waitForRequest(ctx *mm.AIOContext, t *kernel.Task, haveDeadline bool, deadline ktime.Time) (interface{}, error) { + for { + if v, ok := ctx.PopRequest(); ok { + // Request was readly available. Just return it. + return v, nil + } + + // Need to wait for request completion. + done, active := ctx.WaitChannel() + if !active { + // Context has been destroyed. + return nil, syserror.EINVAL + } + if err := t.BlockWithDeadline(done, haveDeadline, deadline); err != nil { + return nil, err + } + } +} + +// memoryFor returns appropriate memory for the given callback. +func memoryFor(t *kernel.Task, cb *ioCallback) (usermem.IOSequence, error) { + bytes := int(cb.Bytes) + if bytes < 0 { + // Linux also requires that this field fit in ssize_t. + return usermem.IOSequence{}, syserror.EINVAL + } + + // Since this I/O will be asynchronous with respect to t's task goroutine, + // we have no guarantee that t's AddressSpace will be active during the + // I/O. + switch cb.OpCode { + case _IOCB_CMD_PREAD, _IOCB_CMD_PWRITE: + return t.SingleIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + AddressSpaceActive: false, + }) + + case _IOCB_CMD_PREADV, _IOCB_CMD_PWRITEV: + return t.IovecsIOSequence(usermem.Addr(cb.Buf), bytes, usermem.IOOpts{ + AddressSpaceActive: false, + }) + + case _IOCB_CMD_FSYNC, _IOCB_CMD_FDSYNC, _IOCB_CMD_NOOP: + return usermem.IOSequence{}, nil + + default: + // Not a supported command. + return usermem.IOSequence{}, syserror.EINVAL + } +} + +func performCallback(t *kernel.Task, file *fs.File, cbAddr usermem.Addr, cb *ioCallback, ioseq usermem.IOSequence, ctx *mm.AIOContext, eventFile *fs.File) { + ev := &ioEvent{ + Data: cb.Data, + Obj: uint64(cbAddr), + } + + // Construct a context.Context that will not be interrupted if t is + // interrupted. + c := t.AsyncContext() + + var err error + switch cb.OpCode { + case _IOCB_CMD_PREAD, _IOCB_CMD_PREADV: + ev.Result, err = file.Preadv(c, ioseq, cb.Offset) + case _IOCB_CMD_PWRITE, _IOCB_CMD_PWRITEV: + ev.Result, err = file.Pwritev(c, ioseq, cb.Offset) + case _IOCB_CMD_FSYNC: + err = file.Fsync(c, 0, fs.FileMaxOffset, fs.SyncAll) + case _IOCB_CMD_FDSYNC: + err = file.Fsync(c, 0, fs.FileMaxOffset, fs.SyncData) + } + + // Update the result. + if err != nil { + err = handleIOError(t, ev.Result != 0 /* partial */, err, nil /* never interrupted */, "aio", file) + ev.Result = -int64(t.ExtractErrno(err, 0)) + } + + file.DecRef() + + // Queue the result for delivery. + ctx.FinishRequest(ev) + + // Notify the event file if one was specified. This needs to happen + // *after* queueing the result to avoid racing with the thread we may + // wake up. + if eventFile != nil { + eventFile.FileOperations.(*eventfd.EventOperations).Signal(1) + eventFile.DecRef() + } +} + +// submitCallback processes a single callback. +func submitCallback(t *kernel.Task, id uint64, cb *ioCallback, cbAddr usermem.Addr) error { + file := t.FDMap().GetFile(kdefs.FD(cb.FD)) + if file == nil { + // File not found. + return syserror.EBADF + } + defer file.DecRef() + + // Was there an eventFD? Extract it. + var eventFile *fs.File + if cb.Flags&_IOCB_FLAG_RESFD != 0 { + eventFile := t.FDMap().GetFile(kdefs.FD(cb.ResFD)) + if eventFile == nil { + // Bad FD. + return syserror.EBADF + } + defer eventFile.DecRef() + + // Check that it is an eventfd. + if _, ok := eventFile.FileOperations.(*eventfd.EventOperations); !ok { + // Not an event FD. + return syserror.EINVAL + } + } + + ioseq, err := memoryFor(t, cb) + if err != nil { + return err + } + + // Prepare the request. + ctx, ok := t.MemoryManager().LookupAIOContext(t, id) + if !ok { + return syserror.EINVAL + } + if ready := ctx.Prepare(); !ready { + // Context is busy. + return syserror.EAGAIN + } + + if eventFile != nil { + // The request is set. Make sure there's a ref on the file. + // + // This is necessary when the callback executes on completion, + // which is also what will release this reference. + eventFile.IncRef() + } + + // Perform the request asynchronously. + file.IncRef() + fs.Async(func() { performCallback(t, file, cbAddr, cb, ioseq, ctx, eventFile) }) + + // All set. + return nil +} + +// IoSubmit implements linux syscall io_submit(2). +func IoSubmit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + id := args[0].Uint64() + nrEvents := args[1].Int() + addr := args[2].Pointer() + + for i := int32(0); i < nrEvents; i++ { + // Copy in the address. + cbAddrNative := t.Arch().Native(0) + if _, err := t.CopyIn(addr, cbAddrNative); err != nil { + if i > 0 { + // Some successful. + return uintptr(i), nil, nil + } + // Nothing done. + return 0, nil, err + } + + // Copy in this callback. + var cb ioCallback + cbAddr := usermem.Addr(t.Arch().Value(cbAddrNative)) + if _, err := t.CopyIn(cbAddr, &cb); err != nil { + + if i > 0 { + // Some have been successful. + return uintptr(i), nil, nil + } + // Nothing done. + return 0, nil, err + } + + // Process this callback. + if err := submitCallback(t, id, &cb, cbAddr); err != nil { + if i > 0 { + // Partial success. + return uintptr(i), nil, nil + } + // Nothing done. + return 0, nil, err + } + + // Advance to the next one. + addr += usermem.Addr(t.Arch().Width()) + } + + return uintptr(nrEvents), nil, nil +} + +// IoCancel implements linux syscall io_cancel(2). +// +// It is not presently supported (ENOSYS indicates no support on this +// architecture). +func IoCancel(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, syserror.ENOSYS +} diff --git a/pkg/sentry/syscalls/linux/sys_capability.go b/pkg/sentry/syscalls/linux/sys_capability.go new file mode 100644 index 000000000..89c81ac90 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_capability.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func lookupCaps(t *kernel.Task, tid kernel.ThreadID) (permitted, inheritable, effective auth.CapabilitySet, err error) { + if tid < 0 { + err = syserror.EINVAL + return + } + if tid > 0 { + t = t.PIDNamespace().TaskWithID(tid) + } + if t == nil { + err = syserror.ESRCH + return + } + creds := t.Credentials() + permitted, inheritable, effective = creds.PermittedCaps, creds.InheritableCaps, creds.EffectiveCaps + return +} + +// Capget implements Linux syscall capget. +func Capget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + hdrAddr := args[0].Pointer() + dataAddr := args[1].Pointer() + + var hdr linux.CapUserHeader + if _, err := t.CopyIn(hdrAddr, &hdr); err != nil { + return 0, nil, err + } + // hdr.Pid doesn't need to be valid if this capget() is a "version probe" + // (hdr.Version is unrecognized and dataAddr is null), so we can't do the + // lookup yet. + switch hdr.Version { + case linux.LINUX_CAPABILITY_VERSION_1: + if dataAddr == 0 { + return 0, nil, nil + } + p, i, e, err := lookupCaps(t, kernel.ThreadID(hdr.Pid)) + if err != nil { + return 0, nil, err + } + data := linux.CapUserData{ + Effective: uint32(e), + Permitted: uint32(p), + Inheritable: uint32(i), + } + _, err = t.CopyOut(dataAddr, &data) + return 0, nil, err + + case linux.LINUX_CAPABILITY_VERSION_2, linux.LINUX_CAPABILITY_VERSION_3: + if dataAddr == 0 { + return 0, nil, nil + } + p, i, e, err := lookupCaps(t, kernel.ThreadID(hdr.Pid)) + if err != nil { + return 0, nil, err + } + data := [2]linux.CapUserData{ + { + Effective: uint32(e), + Permitted: uint32(p), + Inheritable: uint32(i), + }, + { + Effective: uint32(e >> 32), + Permitted: uint32(p >> 32), + Inheritable: uint32(i >> 32), + }, + } + _, err = t.CopyOut(dataAddr, &data) + return 0, nil, err + + default: + hdr.Version = linux.HighestCapabilityVersion + if _, err := t.CopyOut(hdrAddr, &hdr); err != nil { + return 0, nil, err + } + if dataAddr != 0 { + return 0, nil, syserror.EINVAL + } + return 0, nil, nil + } +} + +// Capset implements Linux syscall capset. +func Capset(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + hdrAddr := args[0].Pointer() + dataAddr := args[1].Pointer() + + var hdr linux.CapUserHeader + if _, err := t.CopyIn(hdrAddr, &hdr); err != nil { + return 0, nil, err + } + switch hdr.Version { + case linux.LINUX_CAPABILITY_VERSION_1: + if tid := kernel.ThreadID(hdr.Pid); tid != 0 && tid != t.ThreadID() { + return 0, nil, syserror.EPERM + } + var data linux.CapUserData + if _, err := t.CopyIn(dataAddr, &data); err != nil { + return 0, nil, err + } + p := auth.CapabilitySet(data.Permitted) & auth.AllCapabilities + i := auth.CapabilitySet(data.Inheritable) & auth.AllCapabilities + e := auth.CapabilitySet(data.Effective) & auth.AllCapabilities + return 0, nil, t.SetCapabilitySets(p, i, e) + + case linux.LINUX_CAPABILITY_VERSION_2, linux.LINUX_CAPABILITY_VERSION_3: + if tid := kernel.ThreadID(hdr.Pid); tid != 0 && tid != t.ThreadID() { + return 0, nil, syserror.EPERM + } + var data [2]linux.CapUserData + if _, err := t.CopyIn(dataAddr, &data); err != nil { + return 0, nil, err + } + p := (auth.CapabilitySet(data[0].Permitted) | (auth.CapabilitySet(data[1].Permitted) << 32)) & auth.AllCapabilities + i := (auth.CapabilitySet(data[0].Inheritable) | (auth.CapabilitySet(data[1].Inheritable) << 32)) & auth.AllCapabilities + e := (auth.CapabilitySet(data[0].Effective) | (auth.CapabilitySet(data[1].Effective) << 32)) & auth.AllCapabilities + return 0, nil, t.SetCapabilitySets(p, i, e) + + default: + hdr.Version = linux.HighestCapabilityVersion + if _, err := t.CopyOut(hdrAddr, &hdr); err != nil { + return 0, nil, err + } + return 0, nil, syserror.EINVAL + } +} diff --git a/pkg/sentry/syscalls/linux/sys_epoll.go b/pkg/sentry/syscalls/linux/sys_epoll.go new file mode 100644 index 000000000..e69dfc77a --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_epoll.go @@ -0,0 +1,171 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/epoll" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// EpollCreate1 implements the epoll_create1(2) linux syscall. +func EpollCreate1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + flags := args[0].Int() + if flags & ^syscall.EPOLL_CLOEXEC != 0 { + return 0, nil, syserror.EINVAL + } + + closeOnExec := flags&syscall.EPOLL_CLOEXEC != 0 + fd, err := syscalls.CreateEpoll(t, closeOnExec) + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// EpollCreate implements the epoll_create(2) linux syscall. +func EpollCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + size := args[0].Int() + + if size <= 0 { + return 0, nil, syserror.EINVAL + } + + fd, err := syscalls.CreateEpoll(t, false) + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// EpollCtl implements the epoll_ctl(2) linux syscall. +func EpollCtl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + epfd := kdefs.FD(args[0].Int()) + op := args[1].Int() + fd := kdefs.FD(args[2].Int()) + eventAddr := args[3].Pointer() + + // Capture the event state if needed. + flags := epoll.EntryFlags(0) + mask := waiter.EventMask(0) + var data [2]int32 + if op != syscall.EPOLL_CTL_DEL { + var e syscall.EpollEvent + if _, err := t.CopyIn(eventAddr, &e); err != nil { + return 0, nil, err + } + + if e.Events&syscall.EPOLLONESHOT != 0 { + flags |= epoll.OneShot + } + + // syscall.EPOLLET is incorrectly generated as a negative number + // in Go, see https://github.com/golang/go/issues/5328 for + // details. + if e.Events&-syscall.EPOLLET != 0 { + flags |= epoll.EdgeTriggered + } + + mask = waiter.EventMask(e.Events) + data[0] = e.Fd + data[1] = e.Pad + } + + // Perform the requested operations. + switch op { + case syscall.EPOLL_CTL_ADD: + // See fs/eventpoll.c. + mask |= waiter.EventHUp | waiter.EventErr + return 0, nil, syscalls.AddEpoll(t, epfd, fd, flags, mask, data) + case syscall.EPOLL_CTL_DEL: + return 0, nil, syscalls.RemoveEpoll(t, epfd, fd) + case syscall.EPOLL_CTL_MOD: + // Same as EPOLL_CTL_ADD. + mask |= waiter.EventHUp | waiter.EventErr + return 0, nil, syscalls.UpdateEpoll(t, epfd, fd, flags, mask, data) + default: + return 0, nil, syserror.EINVAL + } +} + +// copyOutEvents copies epoll events from the kernel to user memory. +func copyOutEvents(t *kernel.Task, addr usermem.Addr, e []epoll.Event) error { + const itemLen = 12 + if _, ok := addr.AddLength(uint64(len(e)) * itemLen); !ok { + return syserror.EFAULT + } + + b := t.CopyScratchBuffer(itemLen) + for i := range e { + usermem.ByteOrder.PutUint32(b[0:], e[i].Events) + usermem.ByteOrder.PutUint32(b[4:], uint32(e[i].Data[0])) + usermem.ByteOrder.PutUint32(b[8:], uint32(e[i].Data[1])) + if _, err := t.CopyOutBytes(addr, b); err != nil { + return err + } + addr += itemLen + } + + return nil +} + +// EpollWait implements the epoll_wait(2) linux syscall. +func EpollWait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + epfd := kdefs.FD(args[0].Int()) + eventsAddr := args[1].Pointer() + maxEvents := int(args[2].Int()) + timeout := int(args[3].Int()) + + r, err := syscalls.WaitEpoll(t, epfd, maxEvents, timeout) + if err != nil { + return 0, nil, syserror.ConvertIntr(err, syserror.EINTR) + } + + if len(r) != 0 { + if err := copyOutEvents(t, eventsAddr, r); err != nil { + return 0, nil, err + } + } + + return uintptr(len(r)), nil, nil +} + +// EpollPwait implements the epoll_pwait(2) linux syscall. +func EpollPwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + maskAddr := args[4].Pointer() + maskSize := uint(args[5].Uint()) + + if maskAddr != 0 { + mask, err := copyInSigSet(t, maskAddr, maskSize) + if err != nil { + return 0, nil, err + } + + oldmask := t.SignalMask() + t.SetSignalMask(mask) + t.SetSavedSignalMask(oldmask) + } + + return EpollWait(t, args) +} diff --git a/pkg/sentry/syscalls/linux/sys_eventfd.go b/pkg/sentry/syscalls/linux/sys_eventfd.go new file mode 100644 index 000000000..60fe5a133 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_eventfd.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/eventfd" +) + +const ( + // EFD_SEMAPHORE is a flag used in syscall eventfd(2) and eventfd2(2). Please + // see its man page for more information. + EFD_SEMAPHORE = 1 + EFD_NONBLOCK = 0x800 + EFD_CLOEXEC = 0x80000 +) + +// Eventfd2 implements linux syscall eventfd2(2). +func Eventfd2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + initVal := args[0].Int() + flags := uint(args[1].Uint()) + allOps := uint(EFD_SEMAPHORE | EFD_NONBLOCK | EFD_CLOEXEC) + + if flags & ^allOps != 0 { + return 0, nil, syscall.EINVAL + } + + event := eventfd.New(t, uint64(initVal), flags&EFD_SEMAPHORE != 0) + event.SetFlags(fs.SettableFileFlags{ + NonBlocking: flags&EFD_NONBLOCK != 0, + }) + defer event.DecRef() + + fd, err := t.FDMap().NewFDFrom(0, event, kernel.FDFlags{ + CloseOnExec: flags&EFD_CLOEXEC != 0, + }, + t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// Eventfd implements linux syscall eventfd(2). +func Eventfd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + args[1].Value = 0 + return Eventfd2(t, args) +} diff --git a/pkg/sentry/syscalls/linux/sys_file.go b/pkg/sentry/syscalls/linux/sys_file.go new file mode 100644 index 000000000..a2dbba7e0 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_file.go @@ -0,0 +1,1942 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "io" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/lock" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// fileOpAt performs an operation on the second last component in the path. +func fileOpAt(t *kernel.Task, dirFD kdefs.FD, path string, fn func(root *fs.Dirent, d *fs.Dirent, name string) error) error { + // Extract the last component. + dir, name := fs.SplitLast(path) + if dir == "/" { + // Common case: we are accessing a file in the root. + root := t.FSContext().RootDirectory() + err := fn(root, root, name) + root.DecRef() + return err + } else if dir == "." && dirFD == linux.AT_FDCWD { + // Common case: we are accessing a file relative to the current + // working directory; skip the look-up. + wd := t.FSContext().WorkingDirectory() + root := t.FSContext().RootDirectory() + err := fn(root, wd, name) + wd.DecRef() + root.DecRef() + return err + } + + return fileOpOn(t, dirFD, dir, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return fn(root, d, name) + }) +} + +// fileOpOn performs an operation on the last entry of the path. +func fileOpOn(t *kernel.Task, dirFD kdefs.FD, path string, resolve bool, fn func(root *fs.Dirent, d *fs.Dirent) error) error { + var ( + d *fs.Dirent // The file. + wd *fs.Dirent // The working directory (if required.) + rel *fs.Dirent // The relative directory for search (if required.) + f *fs.File // The file corresponding to dirFD (if required.) + err error + ) + + // Extract the working directory (maybe). + if len(path) > 0 && path[0] == '/' { + // Absolute path; rel can be nil. + } else if dirFD == linux.AT_FDCWD { + // Need to reference the working directory. + wd = t.FSContext().WorkingDirectory() + rel = wd + } else { + // Need to extract the given FD. + f = t.FDMap().GetFile(dirFD) + if f == nil { + return syserror.EBADF + } + rel = f.Dirent + if !fs.IsDir(rel.Inode.StableAttr) { + return syserror.ENOTDIR + } + } + + // Grab the root (always required.) + root := t.FSContext().RootDirectory() + + // Lookup the node. + if resolve { + d, err = t.MountNamespace().FindInode(t, root, rel, path, linux.MaxSymlinkTraversals) + } else { + d, err = t.MountNamespace().FindLink(t, root, rel, path, linux.MaxSymlinkTraversals) + } + root.DecRef() + if wd != nil { + wd.DecRef() + } + if f != nil { + f.DecRef() + } + if err != nil { + return err + } + + err = fn(root, d) + d.DecRef() + return err +} + +// copyInPath copies a path in. +func copyInPath(t *kernel.Task, addr usermem.Addr, allowEmpty bool) (path string, dirPath bool, err error) { + path, err = t.CopyInString(addr, syscall.PathMax) + if err != nil { + return "", false, err + } + if path == "" && !allowEmpty { + return "", false, syserror.ENOENT + } + + // If the path ends with a /, then checks must be enforced in various + // ways in the different callers. We pass this back to the caller. + path, dirPath = fs.TrimTrailingSlashes(path) + + return path, dirPath, nil +} + +func openAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint) (fd uintptr, err error) { + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, err + } + + err = fileOpOn(t, dirFD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + // First check a few things about the filesystem before trying to get the file + // reference. + // + // It's required that Check does not try to open files not that aren't backed by + // this dirent (e.g. pipes and sockets) because this would result in opening these + // files an extra time just to check permissions. + if err := d.Inode.CheckPermission(t, flagsToPermissions(flags)); err != nil { + return err + } + + fileFlags := linuxToFlags(flags) + isDir := fs.IsDir(d.Inode.StableAttr) + + // If O_DIRECTORY is set, but the file is not a directory, then fail. + if fileFlags.Directory && !isDir { + return syserror.ENOTDIR + } + + // If it's a directory, then make sure. + if dirPath && !isDir { + return syserror.ENOTDIR + } + + // Don't allow directories to be opened writable. + if isDir && fileFlags.Write { + return syserror.EISDIR + } + + file, err := d.Inode.GetFile(t, d, fileFlags) + if err != nil { + return syserror.ConvertIntr(err, kernel.ERESTARTSYS) + } + defer file.DecRef() + + // Success. + fdFlags := kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0} + newFD, err := t.FDMap().NewFDFrom(0, file, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + return err + } + + // Set return result in frame. + fd = uintptr(newFD) + + // Generate notification for opened file. + d.InotifyEvent(linux.IN_OPEN, 0) + + return nil + }) + return fd, err // Use result in frame. +} + +func mknodAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, mode linux.FileMode) error { + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + if dirPath { + return syserror.ENOENT + } + + return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Do we have the appropriate permissions on the parent? + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + + // Attempt a creation. + perms := fs.FilePermsFromMode(mode &^ linux.FileMode(t.FSContext().Umask())) + + switch mode.FileType() { + case 0: + // "Zero file type is equivalent to type S_IFREG." - mknod(2) + fallthrough + case linux.ModeRegular: + // We are not going to return the file, so the actual + // flags used don't matter, but they cannot be empty or + // Create will complain. + flags := fs.FileFlags{Read: true, Write: true} + file, err := d.Create(t, root, name, flags, perms) + if err != nil { + return err + } + file.DecRef() + return nil + + case linux.ModeNamedPipe: + return d.CreateFifo(t, root, name, perms) + + case linux.ModeSocket: + // While it is possible create a unix domain socket file on linux + // using mknod(2), in practice this is pretty useless from an + // application. Linux internally uses mknod() to create the socket + // node during bind(2), but we implement bind(2) independently. If + // an application explicitly creates a socket node using mknod(), + // you can't seem to bind() or connect() to the resulting socket. + // + // Instead of emulating this seemingly useless behaviour, we'll + // indicate that the filesystem doesn't support the creation of + // sockets. + return syserror.EOPNOTSUPP + + case linux.ModeCharacterDevice: + fallthrough + case linux.ModeBlockDevice: + // TODO: We don't support creating block or character + // devices at the moment. + // + // When we start supporting block and character devices, we'll + // need to check for CAP_MKNOD here. + return syserror.EPERM + + default: + // "EINVAL - mode requested creation of something other than a + // regular file, device special file, FIFO or socket." - mknod(2) + return syserror.EINVAL + } + }) +} + +// Mknod implements the linux syscall mknod(2). +func Mknod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + path := args[0].Pointer() + mode := linux.FileMode(args[1].ModeT()) + // We don't need this argument until we support creation of device nodes. + _ = args[2].Uint() // dev + + return 0, nil, mknodAt(t, linux.AT_FDCWD, path, mode) +} + +// Mknodat implements the linux syscall mknodat(2). +func Mknodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + path := args[1].Pointer() + mode := linux.FileMode(args[2].ModeT()) + // We don't need this argument until we support creation of device nodes. + _ = args[3].Uint() // dev + + return 0, nil, mknodAt(t, dirFD, path, mode) +} + +func createAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, flags uint, mode linux.FileMode) (fd uintptr, err error) { + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, err + } + if dirPath { + return 0, syserror.ENOENT + } + + err = fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Does this file exist already? + targetDirent, err := t.MountNamespace().FindInode(t, root, d, name, linux.MaxSymlinkTraversals) + var newFile *fs.File + switch err { + case nil: + // The file existed. + defer targetDirent.DecRef() + + // Check if we wanted to create. + if flags&syscall.O_EXCL != 0 { + return syserror.EEXIST + } + + // Like sys_open, check for a few things about the + // filesystem before trying to get a reference to the + // fs.File. The same constraints on Check apply. + if err := targetDirent.Inode.CheckPermission(t, flagsToPermissions(flags)); err != nil { + return err + } + + // Should we truncate the file? + if flags&syscall.O_TRUNC != 0 { + if err := targetDirent.Inode.Truncate(t, targetDirent, 0); err != nil { + return err + } + } + + // Create a new fs.File. + newFile, err = targetDirent.Inode.GetFile(t, targetDirent, linuxToFlags(flags)) + if err != nil { + return syserror.ConvertIntr(err, kernel.ERESTARTSYS) + } + defer newFile.DecRef() + case syserror.EACCES: + // Permission denied while walking to the file. + return err + default: + // Do we have write permissions on the parent? + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + + // Attempt a creation. + perms := fs.FilePermsFromMode(mode &^ linux.FileMode(t.FSContext().Umask())) + newFile, err = d.Create(t, root, name, linuxToFlags(flags), perms) + if err != nil { + // No luck, bail. + return err + } + defer newFile.DecRef() + targetDirent = newFile.Dirent + } + + // Success. + fdFlags := kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0} + newFD, err := t.FDMap().NewFDFrom(0, newFile, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + return err + } + + // Set result in frame. + fd = uintptr(newFD) + + // Queue the open inotify event. The creation event is + // automatically queued when the dirent is targetDirent. The + // open events are implemented at the syscall layer so we need + // to manually queue one here. + targetDirent.InotifyEvent(linux.IN_OPEN, 0) + + return nil + }) + return fd, err // Use result in frame. +} + +// Open implements linux syscall open(2). +func Open(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + flags := uint(args[1].Uint()) + if flags&syscall.O_CREAT != 0 { + mode := linux.FileMode(args[2].ModeT()) + n, err := createAt(t, linux.AT_FDCWD, addr, flags, mode) + return n, nil, err + } + n, err := openAt(t, linux.AT_FDCWD, addr, flags) + return n, nil, err +} + +// Openat implements linux syscall openat(2). +func Openat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + flags := uint(args[2].Uint()) + if flags&syscall.O_CREAT != 0 { + mode := linux.FileMode(args[3].ModeT()) + n, err := createAt(t, dirFD, addr, flags, mode) + return n, nil, err + } + n, err := openAt(t, dirFD, addr, flags) + return n, nil, err +} + +// Creat implements linux syscall creat(2). +func Creat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + mode := linux.FileMode(args[1].ModeT()) + n, err := createAt(t, linux.AT_FDCWD, addr, syscall.O_WRONLY|syscall.O_TRUNC, mode) + return n, nil, err +} + +// accessContext is a context that overrides the credentials used, but +// otherwise carries the same values as the embedded context. +// +// accessContext should only be used for access(2). +type accessContext struct { + context.Context + creds auth.Credentials +} + +// Value implements context.Context. +func (ac accessContext) Value(key interface{}) interface{} { + switch key { + case auth.CtxCredentials: + return &ac.creds + default: + return ac.Context.Value(key) + } +} + +func accessAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, resolve bool, mode uint) error { + const rOK = 4 + const wOK = 2 + const xOK = 1 + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + + // Sanity check the mode. + if mode&^(rOK|wOK|xOK) != 0 { + return syserror.EINVAL + } + + return fileOpOn(t, dirFD, path, resolve, func(root *fs.Dirent, d *fs.Dirent) error { + // access(2) and faccessat(2) check permissions using real + // UID/GID, not effective UID/GID. + // + // "access() needs to use the real uid/gid, not the effective + // uid/gid. We do this by temporarily clearing all FS-related + // capabilities and switching the fsuid/fsgid around to the + // real ones." -fs/open.c:faccessat + creds := t.Credentials() + creds.EffectiveKUID = creds.RealKUID + creds.EffectiveKGID = creds.RealKGID + if creds.EffectiveKUID.In(creds.UserNamespace) == auth.RootUID { + creds.EffectiveCaps = creds.PermittedCaps + } else { + creds.EffectiveCaps = 0 + } + + ctx := &accessContext{ + Context: t, + creds: creds, + } + + if err := d.Inode.CheckPermission(ctx, fs.PermMask{ + Read: mode&rOK != 0, + Write: mode&wOK != 0, + Execute: mode&xOK != 0, + }); err != nil { + return err + } + return nil + }) +} + +// Access implements linux syscall access(2). +func Access(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + mode := args[1].ModeT() + + return 0, nil, accessAt(t, linux.AT_FDCWD, addr, true, mode) +} + +// Faccessat implements linux syscall faccessat(2). +func Faccessat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + mode := args[2].ModeT() + flags := args[3].Int() + + return 0, nil, accessAt(t, dirFD, addr, flags&linux.AT_SYMLINK_NOFOLLOW == 0, mode) +} + +// Ioctl implements linux syscall ioctl(2). +func Ioctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + request := int(args[1].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Shared flags between file and socket. + switch request { + case linux.FIONCLEX: + t.FDMap().SetFlags(fd, kernel.FDFlags{ + CloseOnExec: false, + }) + return 0, nil, nil + case linux.FIOCLEX: + t.FDMap().SetFlags(fd, kernel.FDFlags{ + CloseOnExec: true, + }) + return 0, nil, nil + + case linux.FIONBIO: + var set int32 + if _, err := t.CopyIn(args[2].Pointer(), &set); err != nil { + return 0, nil, err + } + flags := file.Flags() + if set != 0 { + flags.NonBlocking = true + } else { + flags.NonBlocking = false + } + file.SetFlags(flags.Settable()) + return 0, nil, nil + + default: + ret, err := file.FileOperations.Ioctl(t, t.MemoryManager(), args) + if err != nil { + return 0, nil, err + } + + return ret, nil, nil + } +} + +// Getcwd implements the linux syscall getcwd(2). +func Getcwd(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + size := args[1].SizeT() + cwd := t.FSContext().WorkingDirectory() + defer cwd.DecRef() + root := t.FSContext().RootDirectory() + defer root.DecRef() + + // Get our fullname from the root and preprend unreachable if the root was + // unreachable from our current dirent this is the same behavior as on linux. + s, reachable := cwd.FullName(root) + if !reachable { + s = "(unreachable)" + s + } + + // Note this is >= because we need a terminator. + if uint(len(s)) >= size { + return 0, nil, syserror.ERANGE + } + + // Copy out the path name for the node. + bytes, err := t.CopyOutBytes(addr, []byte(s)) + if err != nil { + return 0, nil, err + } + + // Top it off with a terminator. + _, err = t.CopyOut(addr+usermem.Addr(bytes), []byte("\x00")) + return uintptr(bytes + 1), nil, err +} + +// Chroot implements the linux syscall chroot(2). +func Chroot(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + if !t.HasCapability(linux.CAP_SYS_CHROOT) { + return 0, nil, syserror.EPERM + } + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + // Is it a directory? + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Does it have execute permissions? + if err := d.Inode.CheckPermission(t, fs.PermMask{Execute: true}); err != nil { + return err + } + + t.FSContext().SetRootDirectory(d) + return nil + }) +} + +// Chdir implements the linux syscall chdir(2). +func Chdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + // Is it a directory? + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Does it have execute permissions? + if err := d.Inode.CheckPermission(t, fs.PermMask{Execute: true}); err != nil { + return err + } + + t.FSContext().SetWorkingDirectory(d) + return nil + }) +} + +// Fchdir implements the linux syscall fchdir(2). +func Fchdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Is it a directory? + if !fs.IsDir(file.Dirent.Inode.StableAttr) { + return 0, nil, syserror.ENOTDIR + } + + // Does it have execute permissions? + if err := file.Dirent.Inode.CheckPermission(t, fs.PermMask{Execute: true}); err != nil { + return 0, nil, err + } + + t.FSContext().SetWorkingDirectory(file.Dirent) + return 0, nil, nil +} + +// Close implements linux syscall close(2). +func Close(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file, ok := t.FDMap().Remove(fd) + if !ok { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + err := file.Flush(t) + return 0, nil, handleIOError(t, false /* partial */, err, syscall.EINTR, "close", file) +} + +// Dup implements linux syscall dup(2). +func Dup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + newfd, err := t.FDMap().NewFDFrom(0, file, kernel.FDFlags{}, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, syserror.EMFILE + } + return uintptr(newfd), nil, nil +} + +// Dup2 implements linux syscall dup2(2). +func Dup2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldfd := kdefs.FD(args[0].Int()) + newfd := kdefs.FD(args[1].Int()) + + // If oldfd is a valid file descriptor, and newfd has the same value as oldfd, + // then dup2() does nothing, and returns newfd. + if oldfd == newfd { + oldFile := t.FDMap().GetFile(oldfd) + if oldFile == nil { + return 0, nil, syserror.EBADF + } + defer oldFile.DecRef() + + return uintptr(newfd), nil, nil + } + + // Zero out flags arg to be used by Dup3. + args[2].Value = 0 + return Dup3(t, args) +} + +// Dup3 implements linux syscall dup3(2). +func Dup3(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldfd := kdefs.FD(args[0].Int()) + newfd := kdefs.FD(args[1].Int()) + flags := args[2].Uint() + + if oldfd == newfd { + return 0, nil, syserror.EINVAL + } + + oldFile := t.FDMap().GetFile(oldfd) + if oldFile == nil { + return 0, nil, syserror.EBADF + } + defer oldFile.DecRef() + + err := t.FDMap().NewFDAt(newfd, oldFile, kernel.FDFlags{CloseOnExec: flags&syscall.O_CLOEXEC != 0}, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + + return uintptr(newfd), nil, nil +} + +// Fcntl implements linux syscall fcntl(2). +func Fcntl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + cmd := args[1].Int() + + file, flags := t.FDMap().GetDescriptor(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + switch cmd { + case syscall.F_DUPFD, syscall.F_DUPFD_CLOEXEC: + from := kdefs.FD(args[2].Int()) + fdFlags := kernel.FDFlags{CloseOnExec: cmd == syscall.F_DUPFD_CLOEXEC} + fd, err := t.FDMap().NewFDFrom(from, file, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + return uintptr(fd), nil, nil + case syscall.F_GETFD: + return uintptr(fdFlagsToLinux(flags)), nil, nil + case syscall.F_SETFD: + flags := args[2].Uint() + t.FDMap().SetFlags(fd, kernel.FDFlags{ + CloseOnExec: flags&syscall.FD_CLOEXEC != 0, + }) + case syscall.F_GETFL: + return uintptr(flagsToLinux(file.Flags())), nil, nil + case syscall.F_SETFL: + flags := uint(args[2].Uint()) + file.SetFlags(linuxToSettableFlags(flags)) + case syscall.F_SETLK, syscall.F_SETLKW: + // In Linux the file system can choose to provide lock operations for an inode. + // Normally pipe and socket types lack lock operations. We diverge and use a heavy + // hammer by only allowing locks on files and directories. + if !fs.IsFile(file.Dirent.Inode.StableAttr) && !fs.IsDir(file.Dirent.Inode.StableAttr) { + return 0, nil, syserror.EBADF + } + + // Copy in the lock request. + flockAddr := args[2].Pointer() + var flock syscall.Flock_t + if _, err := t.CopyIn(flockAddr, &flock); err != nil { + return 0, nil, err + } + + // Compute the lock whence. + var sw fs.SeekWhence + switch flock.Whence { + case 0: + sw = fs.SeekSet + case 1: + sw = fs.SeekCurrent + case 2: + sw = fs.SeekEnd + default: + return 0, nil, syserror.EINVAL + } + + // Compute the lock offset. + var off int64 + switch sw { + case fs.SeekSet: + off = 0 + case fs.SeekCurrent: + // Note that Linux does not hold any mutexes while retrieving the file offset, + // see fs/locks.c:flock_to_posix_lock and fs/locks.c:fcntl_setlk. + off = file.Offset() + case fs.SeekEnd: + uattr, err := file.Dirent.Inode.UnstableAttr(t) + if err != nil { + return 0, nil, err + } + off = uattr.Size + default: + return 0, nil, syserror.EINVAL + } + + // Compute the lock range. + rng, err := lock.ComputeRange(flock.Start, flock.Len, off) + if err != nil { + return 0, nil, err + } + + // The lock uid is that of the Task's FDMap. + lockUniqueID := lock.UniqueID(t.FDMap().ID()) + + // These locks don't block; execute the non-blocking operation using the inode's lock + // context directly. + switch flock.Type { + case syscall.F_RDLCK: + if !file.Flags().Read { + return 0, nil, syserror.EBADF + } + if cmd == syscall.F_SETLK { + // Non-blocking lock, provide a nil lock.Blocker. + if !file.Dirent.Inode.LockCtx.Posix.LockRegion(lockUniqueID, lock.ReadLock, rng, nil) { + return 0, nil, syserror.EAGAIN + } + } else { + // Blocking lock, pass in the task to satisfy the lock.Blocker interface. + if !file.Dirent.Inode.LockCtx.Posix.LockRegion(lockUniqueID, lock.ReadLock, rng, t) { + return 0, nil, syserror.EINTR + } + } + return 0, nil, nil + case syscall.F_WRLCK: + if !file.Flags().Write { + return 0, nil, syserror.EBADF + } + if cmd == syscall.F_SETLK { + // Non-blocking lock, provide a nil lock.Blocker. + if !file.Dirent.Inode.LockCtx.Posix.LockRegion(lockUniqueID, lock.WriteLock, rng, nil) { + return 0, nil, syserror.EAGAIN + } + } else { + // Blocking lock, pass in the task to satisfy the lock.Blocker interface. + if !file.Dirent.Inode.LockCtx.Posix.LockRegion(lockUniqueID, lock.WriteLock, rng, t) { + return 0, nil, syserror.EINTR + } + } + return 0, nil, nil + case syscall.F_UNLCK: + file.Dirent.Inode.LockCtx.Posix.UnlockRegion(lockUniqueID, rng) + return 0, nil, nil + default: + return 0, nil, syserror.EINVAL + } + default: + // Everything else is not yet supported. + return 0, nil, syserror.EINVAL + } + return 0, nil, nil +} + +const ( + _FADV_NORMAL = 0 + _FADV_RANDOM = 1 + _FADV_SEQUENTIAL = 2 + _FADV_WILLNEED = 3 + _FADV_DONTNEED = 4 + _FADV_NOREUSE = 5 +) + +// Fadvise64 implements linux syscall fadvise64(2). +// This implementation currently ignores the provided advice. +func Fadvise64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + offset := args[1].Int64() + length := args[2].Uint() + advice := args[3].Int() + + if offset < 0 || length < 0 { + return 0, nil, syserror.EINVAL + } + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + switch advice { + case _FADV_NORMAL: + case _FADV_RANDOM: + case _FADV_SEQUENTIAL: + case _FADV_WILLNEED: + case _FADV_DONTNEED: + case _FADV_NOREUSE: + default: + return 0, nil, syserror.EINVAL + } + + // Sure, whatever. + return 0, nil, nil +} + +func mkdirAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, mode linux.FileMode) error { + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + + return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Does this directory exist already? + f, err := t.MountNamespace().FindInode(t, root, d, name, linux.MaxSymlinkTraversals) + switch err { + case nil: + // The directory existed. + defer f.DecRef() + return syserror.EEXIST + case syserror.EACCES: + // Permission denied while walking to the directory. + return err + default: + // Do we have write permissions on the parent? + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + + // Create the directory. + perms := fs.FilePermsFromMode(mode &^ linux.FileMode(t.FSContext().Umask())) + return d.CreateDirectory(t, root, name, perms) + } + }) +} + +// Mkdir implements linux syscall mkdir(2). +func Mkdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + mode := linux.FileMode(args[1].ModeT()) + + return 0, nil, mkdirAt(t, linux.AT_FDCWD, addr, mode) +} + +// Mkdirat implements linux syscall mkdirat(2). +func Mkdirat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + mode := linux.FileMode(args[2].ModeT()) + + return 0, nil, mkdirAt(t, dirFD, addr, mode) +} + +func rmdirAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr) error { + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + + // Special case: rmdir rejects anything with '.' as last component. + // This would be handled by the busy check for the current working + // directory, but this is how it's done. + if (len(path) == 1 && path == ".") || (len(path) > 1 && path[len(path)-2:] == "/.") { + return syserror.EINVAL + } + + return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + if err := fs.MayDelete(t, root, d, name); err != nil { + return err + } + + return d.RemoveDirectory(t, root, name) + }) +} + +// Rmdir implements linux syscall rmdir(2). +func Rmdir(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + return 0, nil, rmdirAt(t, linux.AT_FDCWD, addr) +} + +func symlinkAt(t *kernel.Task, dirFD kdefs.FD, newAddr usermem.Addr, oldAddr usermem.Addr) error { + newPath, dirPath, err := copyInPath(t, newAddr, false /* allowEmpty */) + if err != nil { + return err + } + if dirPath { + return syserror.ENOENT + } + + // The oldPath is copied in verbatim. This is because the symlink + // will include all details, including trailing slashes. + oldPath, err := t.CopyInString(oldAddr, syscall.PathMax) + if err != nil { + return err + } + if oldPath == "" { + return syserror.ENOENT + } + + return fileOpAt(t, dirFD, newPath, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Make sure we have write permissions on the parent directory. + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + return d.CreateLink(t, root, oldPath, name) + }) +} + +// Symlink implements linux syscall symlink(2). +func Symlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldAddr := args[0].Pointer() + newAddr := args[1].Pointer() + + return 0, nil, symlinkAt(t, linux.AT_FDCWD, newAddr, oldAddr) +} + +// Symlinkat implements linux syscall symlinkat(2). +func Symlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldAddr := args[0].Pointer() + dirFD := kdefs.FD(args[1].Int()) + newAddr := args[2].Pointer() + + return 0, nil, symlinkAt(t, dirFD, newAddr, oldAddr) +} + +// mayLinkAt determines whether t can create a hard link to target. +// +// This corresponds to Linux's fs/namei.c:may_linkat. +func mayLinkAt(t *kernel.Task, target *fs.Inode) error { + // Technically Linux is more restrictive in 3.11.10 (requires CAP_FOWNER in + // root user namespace); this is from the later f2ca379642d7 "namei: permit + // linking with CAP_FOWNER in userns". + if !target.CheckOwnership(t) { + return syserror.EPERM + } + + // Check that the target is not a directory and that permissions are okay. + if fs.IsDir(target.StableAttr) || target.CheckPermission(t, fs.PermMask{Read: true, Write: true}) != nil { + return syserror.EPERM + } + + return nil +} + +// linkAt creates a hard link to the target specified by oldDirFD and oldAddr, +// specified by newDirFD and newAddr. If resolve is true, then the symlinks +// will be followed when evaluating the target. +func linkAt(t *kernel.Task, oldDirFD kdefs.FD, oldAddr usermem.Addr, newDirFD kdefs.FD, newAddr usermem.Addr, resolve, allowEmpty bool) error { + oldPath, _, err := copyInPath(t, oldAddr, allowEmpty) + if err != nil { + return err + } + newPath, dirPath, err := copyInPath(t, newAddr, false /* allowEmpty */) + if err != nil { + return err + } + if dirPath { + return syserror.ENOENT + } + + if allowEmpty && oldPath == "" { + target := t.FDMap().GetFile(oldDirFD) + if target == nil { + return syserror.EBADF + } + defer target.DecRef() + if err := mayLinkAt(t, target.Dirent.Inode); err != nil { + return err + } + + // Resolve the target directory. + return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string) error { + if !fs.IsDir(newParent.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Make sure we have write permissions on the parent directory. + if err := newParent.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + return newParent.CreateHardLink(t, root, target.Dirent, newName) + }) + } + + // Resolve oldDirFD and oldAddr to a dirent. The "resolve" argument + // only applies to this name. + return fileOpOn(t, oldDirFD, oldPath, resolve, func(root *fs.Dirent, target *fs.Dirent) error { + if err := mayLinkAt(t, target.Inode); err != nil { + return err + } + + // Next resolve newDirFD and newAddr to the parent dirent and name. + return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string) error { + if !fs.IsDir(newParent.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Make sure we have write permissions on the parent directory. + if err := newParent.Inode.CheckPermission(t, fs.PermMask{Write: true, Execute: true}); err != nil { + return err + } + return newParent.CreateHardLink(t, root, target, newName) + }) + }) +} + +// Link implements linux syscall link(2). +func Link(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldAddr := args[0].Pointer() + newAddr := args[1].Pointer() + + // man link(2): + // POSIX.1-2001 says that link() should dereference oldpath if it is a + // symbolic link. However, since kernel 2.0, Linux does not do so: if + // oldpath is a symbolic link, then newpath is created as a (hard) link + // to the same symbolic link file (i.e., newpath becomes a symbolic + // link to the same file that oldpath refers to). + resolve := false + return 0, nil, linkAt(t, linux.AT_FDCWD, oldAddr, linux.AT_FDCWD, newAddr, resolve, false /* allowEmpty */) +} + +// Linkat implements linux syscall linkat(2). +func Linkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldDirFD := kdefs.FD(args[0].Int()) + oldAddr := args[1].Pointer() + newDirFD := kdefs.FD(args[2].Int()) + newAddr := args[3].Pointer() + + // man linkat(2): + // By default, linkat(), does not dereference oldpath if it is a + // symbolic link (like link(2)). Since Linux 2.6.18, the flag + // AT_SYMLINK_FOLLOW can be specified in flags to cause oldpath to be + // dereferenced if it is a symbolic link. + flags := args[4].Int() + resolve := flags&linux.AT_SYMLINK_FOLLOW == linux.AT_SYMLINK_FOLLOW + allowEmpty := flags&linux.AT_EMPTY_PATH == linux.AT_EMPTY_PATH + + if allowEmpty && !t.HasCapabilityIn(linux.CAP_DAC_READ_SEARCH, t.UserNamespace().Root()) { + return 0, nil, syserror.ENOENT + } + + return 0, nil, linkAt(t, oldDirFD, oldAddr, newDirFD, newAddr, resolve, allowEmpty) +} + +func readlinkAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, bufAddr usermem.Addr, size uint) (copied uintptr, err error) { + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, err + } + if dirPath { + return 0, syserror.ENOENT + } + + err = fileOpOn(t, dirFD, path, false /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + // Check for Read permission. + if err := d.Inode.CheckPermission(t, fs.PermMask{Read: true}); err != nil { + return err + } + + s, err := d.Inode.Readlink(t) + if err == syserror.ENOLINK { + return syserror.EINVAL + } + if err != nil { + return err + } + + buffer := []byte(s) + if uint(len(buffer)) > size { + buffer = buffer[:size] + } + + n, err := t.CopyOutBytes(bufAddr, buffer) + + // Update frame return value. + copied = uintptr(n) + + return err + }) + return copied, err // Return frame value. +} + +// Readlink implements linux syscall readlink(2). +func Readlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + bufAddr := args[1].Pointer() + size := args[2].SizeT() + + n, err := readlinkAt(t, linux.AT_FDCWD, addr, bufAddr, size) + return n, nil, err +} + +// Readlinkat implements linux syscall readlinkat(2). +func Readlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + bufAddr := args[2].Pointer() + size := args[3].SizeT() + + n, err := readlinkAt(t, dirFD, addr, bufAddr, size) + return n, nil, err +} + +func unlinkAt(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr) error { + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + if dirPath { + return syserror.ENOENT + } + + return fileOpAt(t, dirFD, path, func(root *fs.Dirent, d *fs.Dirent, name string) error { + if !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + + if err := fs.MayDelete(t, root, d, name); err != nil { + return err + } + + return d.Remove(t, root, name) + }) +} + +// Unlink implements linux syscall unlink(2). +func Unlink(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + return 0, nil, unlinkAt(t, linux.AT_FDCWD, addr) +} + +// Unlinkat implements linux syscall unlinkat(2). +func Unlinkat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + flags := args[2].Uint() + if flags&linux.AT_REMOVEDIR != 0 { + return 0, nil, rmdirAt(t, dirFD, addr) + } + return 0, nil, unlinkAt(t, dirFD, addr) +} + +// Truncate implements linux syscall truncate(2). +func Truncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := args[1].Int64() + + if length < 0 { + return 0, nil, syserror.EINVAL + } + + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + if dirPath { + return 0, nil, syserror.EINVAL + } + + if uint64(length) >= t.ThreadGroup().Limits().Get(limits.FileSize).Cur { + t.SendSignal(&arch.SignalInfo{ + Signo: int32(syscall.SIGXFSZ), + Code: arch.SignalInfoUser, + }) + return 0, nil, syserror.EFBIG + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + if fs.IsDir(d.Inode.StableAttr) { + return syserror.EISDIR + } + if !fs.IsFile(d.Inode.StableAttr) { + return syserror.EINVAL + } + + // Reject truncation if the access permissions do not allow truncation. + // This is different from the behavior of sys_ftruncate, see below. + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true}); err != nil { + return err + } + + if err := d.Inode.Truncate(t, d, length); err != nil { + return err + } + + // File length modified, generate notification. + d.InotifyEvent(linux.IN_MODIFY, 0) + + return nil + }) +} + +// Ftruncate implements linux syscall ftruncate(2). +func Ftruncate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + length := args[1].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Reject truncation if the file flags do not permit this operation. + // This is different from truncate(2) above. + if !file.Flags().Write { + return 0, nil, syserror.EINVAL + } + + // Note that this is different from truncate(2) above, where a + // directory returns EISDIR. + if !fs.IsFile(file.Dirent.Inode.StableAttr) { + return 0, nil, syserror.EINVAL + } + + if length < 0 { + return 0, nil, syserror.EINVAL + } + + if uint64(length) >= t.ThreadGroup().Limits().Get(limits.FileSize).Cur { + t.SendSignal(&arch.SignalInfo{ + Signo: int32(syscall.SIGXFSZ), + Code: arch.SignalInfoUser, + }) + return 0, nil, syserror.EFBIG + } + + if err := file.Dirent.Inode.Truncate(t, file.Dirent, length); err != nil { + return 0, nil, err + } + + // File length modified, generate notification. + file.Dirent.InotifyEvent(linux.IN_MODIFY, 0) + + return 0, nil, nil +} + +// Umask implements linux syscall umask(2). +func Umask(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + mask := args[0].ModeT() + mask = t.FSContext().SwapUmask(mask & 0777) + return uintptr(mask), nil, nil +} + +// Change ownership of a file. +// +// uid and gid may be -1, in which case they will not be changed. +func chown(t *kernel.Task, d *fs.Dirent, uid auth.UID, gid auth.GID) error { + owner := fs.FileOwner{ + UID: auth.NoID, + GID: auth.NoID, + } + + uattr, err := d.Inode.UnstableAttr(t) + if err != nil { + return err + } + c := t.Credentials() + hasCap := d.Inode.CheckCapability(t, linux.CAP_CHOWN) + isOwner := uattr.Owner.UID == c.EffectiveKUID + if uid.Ok() { + kuid := c.UserNamespace.MapToKUID(uid) + // Valid UID must be supplied if UID is to be changed. + if !kuid.Ok() { + return syserror.EINVAL + } + + // "Only a privileged process (CAP_CHOWN) may change the owner + // of a file." -chown(2) + // + // Linux also allows chown if you own the file and are + // explicitly not changing its UID. + isNoop := uattr.Owner.UID == kuid + if !(hasCap || (isOwner && isNoop)) { + return syserror.EPERM + } + + owner.UID = kuid + } + if gid.Ok() { + kgid := c.UserNamespace.MapToKGID(gid) + // Valid GID must be supplied if GID is to be changed. + if !kgid.Ok() { + return syserror.EINVAL + } + + // "The owner of a file may change the group of the file to any + // group of which that owner is a member. A privileged process + // (CAP_CHOWN) may change the group arbitrarily." -chown(2) + isNoop := uattr.Owner.GID == kgid + isMemberGroup := c.InGroup(kgid) + if !(hasCap || (isOwner && (isNoop || isMemberGroup))) { + return syserror.EPERM + } + + owner.GID = kgid + } + + // FIXME: This is racy; the inode's owner may have changed in + // the meantime. (Linux holds i_mutex while calling + // fs/attr.c:notify_change() => inode_operations::setattr => + // inode_change_ok().) + if err := d.Inode.SetOwner(t, d, owner); err != nil { + return err + } + + // When the owner or group are changed by an unprivileged user, + // chown(2) also clears the set-user-ID and set-group-ID bits, but + // we do not support them. + return nil +} + +func chownAt(t *kernel.Task, fd kdefs.FD, addr usermem.Addr, resolve, allowEmpty bool, uid auth.UID, gid auth.GID) error { + path, _, err := copyInPath(t, addr, allowEmpty) + if err != nil { + return err + } + + if path == "" { + // Annoying. What's wrong with fchown? + file := t.FDMap().GetFile(fd) + if file == nil { + return syserror.EBADF + } + defer file.DecRef() + + return chown(t, file.Dirent, uid, gid) + } + + return fileOpOn(t, fd, path, resolve, func(root *fs.Dirent, d *fs.Dirent) error { + return chown(t, d, uid, gid) + }) +} + +// Chown implements linux syscall chown(2). +func Chown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + uid := auth.UID(args[1].Uint()) + gid := auth.GID(args[2].Uint()) + + return 0, nil, chownAt(t, linux.AT_FDCWD, addr, true /* resolve */, false /* allowEmpty */, uid, gid) +} + +// Lchown implements linux syscall lchown(2). +func Lchown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + uid := auth.UID(args[1].Uint()) + gid := auth.GID(args[2].Uint()) + + return 0, nil, chownAt(t, linux.AT_FDCWD, addr, false /* resolve */, false /* allowEmpty */, uid, gid) +} + +// Fchown implements linux syscall fchown(2). +func Fchown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + uid := auth.UID(args[1].Uint()) + gid := auth.GID(args[2].Uint()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + return 0, nil, chown(t, file.Dirent, uid, gid) +} + +// Fchownat implements Linux syscall fchownat(2). +func Fchownat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + uid := auth.UID(args[2].Uint()) + gid := auth.GID(args[3].Uint()) + flags := args[4].Int() + + if flags&^(linux.AT_EMPTY_PATH|linux.AT_SYMLINK_NOFOLLOW) != 0 { + return 0, nil, syserror.EINVAL + } + + return 0, nil, chownAt(t, dirFD, addr, flags&linux.AT_SYMLINK_NOFOLLOW == 0, flags&linux.AT_EMPTY_PATH != 0, uid, gid) +} + +func chmod(t *kernel.Task, d *fs.Dirent, mode linux.FileMode) error { + // Must own file to change mode. + if !d.Inode.CheckOwnership(t) { + return syserror.EPERM + } + + p := fs.FilePermsFromMode(mode) + if !d.Inode.SetPermissions(t, d, p) { + return syserror.EPERM + } + + // File attribute changed, generate notification. + d.InotifyEvent(linux.IN_ATTRIB, 0) + + return nil +} + +func chmodAt(t *kernel.Task, fd kdefs.FD, addr usermem.Addr, mode linux.FileMode) error { + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + + return fileOpOn(t, fd, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return chmod(t, d, mode) + }) +} + +// Chmod implements linux syscall chmod(2). +func Chmod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + mode := linux.FileMode(args[1].ModeT()) + + return 0, nil, chmodAt(t, linux.AT_FDCWD, addr, mode) +} + +// Fchmod implements linux syscall fchmod(2). +func Fchmod(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + mode := linux.FileMode(args[1].ModeT()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + return 0, nil, chmod(t, file.Dirent, mode) +} + +// Fchmodat implements linux syscall fchmodat(2). +func Fchmodat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + mode := linux.FileMode(args[2].ModeT()) + + return 0, nil, chmodAt(t, fd, addr, mode) +} + +// defaultSetToSystemTimeSpec returns a TimeSpec that will set ATime and MTime +// to the system time. +func defaultSetToSystemTimeSpec() fs.TimeSpec { + return fs.TimeSpec{ + ATimeSetSystemTime: true, + MTimeSetSystemTime: true, + } +} + +func utimes(t *kernel.Task, dirFD kdefs.FD, addr usermem.Addr, ts fs.TimeSpec, resolve bool) error { + setTimestamp := func(root *fs.Dirent, d *fs.Dirent) error { + // Does the task own the file? + if !d.Inode.CheckOwnership(t) { + // Trying to set a specific time? Must be owner. + if (ts.ATimeOmit || !ts.ATimeSetSystemTime) && (ts.MTimeOmit || !ts.MTimeSetSystemTime) { + return syserror.EPERM + } + + // Trying to set to current system time? Must have write access. + if err := d.Inode.CheckPermission(t, fs.PermMask{Write: true}); err != nil { + return err + } + } + + return d.Inode.SetTimestamps(t, d, ts) + } + + // From utimes.c: + // "If filename is NULL and dfd refers to an open file, then operate on + // the file. Otherwise look up filename, possibly using dfd as a + // starting point." + if addr == 0 && dirFD != linux.AT_FDCWD { + if !resolve { + // Linux returns EINVAL in this case. See utimes.c. + return syserror.EINVAL + } + f := t.FDMap().GetFile(dirFD) + if f == nil { + return syserror.EBADF + } + defer f.DecRef() + + root := t.FSContext().RootDirectory() + defer root.DecRef() + + return setTimestamp(root, f.Dirent) + } + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return err + } + + return fileOpOn(t, dirFD, path, resolve, setTimestamp) +} + +// Utime implements linux syscall utime(2). +func Utime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + filenameAddr := args[0].Pointer() + timesAddr := args[1].Pointer() + + // No timesAddr argument will be interpreted as current system time. + ts := defaultSetToSystemTimeSpec() + if timesAddr != 0 { + var times syscall.Utimbuf + if _, err := t.CopyIn(timesAddr, ×); err != nil { + return 0, nil, err + } + ts = fs.TimeSpec{ + ATime: ktime.FromSeconds(times.Actime), + MTime: ktime.FromSeconds(times.Modtime), + } + } + return 0, nil, utimes(t, linux.AT_FDCWD, filenameAddr, ts, true) +} + +// Utimes implements linux syscall utimes(2). +func Utimes(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + filenameAddr := args[0].Pointer() + timesAddr := args[1].Pointer() + + // No timesAddr argument will be interpreted as current system time. + ts := defaultSetToSystemTimeSpec() + if timesAddr != 0 { + var times [2]linux.Timeval + if _, err := t.CopyIn(timesAddr, ×); err != nil { + return 0, nil, err + } + ts = fs.TimeSpec{ + ATime: ktime.FromTimeval(times[0]), + MTime: ktime.FromTimeval(times[1]), + } + } + return 0, nil, utimes(t, linux.AT_FDCWD, filenameAddr, ts, true) +} + +// timespecIsValid checks that the timespec is valid for use in utimensat. +func timespecIsValid(ts linux.Timespec) bool { + // Nsec must be UTIME_OMIT, UTIME_NOW, or less than 10^9. + return ts.Nsec == linux.UTIME_OMIT || ts.Nsec == linux.UTIME_NOW || ts.Nsec < 1e9 +} + +// Utimensat implements linux syscall utimensat(2). +func Utimensat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + pathnameAddr := args[1].Pointer() + timesAddr := args[2].Pointer() + flags := args[3].Int() + + // No timesAddr argument will be interpreted as current system time. + ts := defaultSetToSystemTimeSpec() + if timesAddr != 0 { + var times [2]linux.Timespec + if _, err := t.CopyIn(timesAddr, ×); err != nil { + return 0, nil, err + } + if !timespecIsValid(times[0]) || !timespecIsValid(times[1]) { + return 0, nil, syserror.EINVAL + } + + // If both are UTIME_OMIT, this is a noop. + if times[0].Nsec == linux.UTIME_OMIT && times[1].Nsec == linux.UTIME_OMIT { + return 0, nil, nil + } + + ts = fs.TimeSpec{ + ATime: ktime.FromTimespec(times[0]), + ATimeOmit: times[0].Nsec == linux.UTIME_OMIT, + ATimeSetSystemTime: times[0].Nsec == linux.UTIME_NOW, + MTime: ktime.FromTimespec(times[1]), + MTimeOmit: times[1].Nsec == linux.UTIME_OMIT, + MTimeSetSystemTime: times[0].Nsec == linux.UTIME_NOW, + } + } + return 0, nil, utimes(t, dirFD, pathnameAddr, ts, flags&linux.AT_SYMLINK_NOFOLLOW == 0) +} + +// Futimesat implements linux syscall futimesat(2). +func Futimesat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + dirFD := kdefs.FD(args[0].Int()) + pathnameAddr := args[1].Pointer() + timesAddr := args[2].Pointer() + + // No timesAddr argument will be interpreted as current system time. + ts := defaultSetToSystemTimeSpec() + if timesAddr != 0 { + var times [2]linux.Timeval + if _, err := t.CopyIn(timesAddr, ×); err != nil { + return 0, nil, err + } + if times[0].Usec >= 1e6 || times[0].Usec < 0 || + times[1].Usec >= 1e6 || times[1].Usec < 0 { + return 0, nil, syserror.EINVAL + } + + ts = fs.TimeSpec{ + ATime: ktime.FromTimeval(times[0]), + MTime: ktime.FromTimeval(times[1]), + } + } + return 0, nil, utimes(t, dirFD, pathnameAddr, ts, true) +} + +func renameAt(t *kernel.Task, oldDirFD kdefs.FD, oldAddr usermem.Addr, newDirFD kdefs.FD, newAddr usermem.Addr) error { + newPath, _, err := copyInPath(t, newAddr, false /* allowEmpty */) + if err != nil { + return err + } + oldPath, _, err := copyInPath(t, oldAddr, false /* allowEmpty */) + if err != nil { + return err + } + + return fileOpAt(t, oldDirFD, oldPath, func(root *fs.Dirent, oldParent *fs.Dirent, oldName string) error { + if !fs.IsDir(oldParent.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Root cannot be renamed to anything. + // + // TODO: This catches the case when the rename + // argument is exactly "/", but we should return EBUSY when + // renaming any mount point, or when the argument is not + // exactly "/" but still resolves to the root, like "/.." or + // "/bin/..". + if oldParent == root && oldName == "." { + return syscall.EBUSY + } + return fileOpAt(t, newDirFD, newPath, func(root *fs.Dirent, newParent *fs.Dirent, newName string) error { + if !fs.IsDir(newParent.Inode.StableAttr) { + return syserror.ENOTDIR + } + + // Nothing can be renamed to root. + // + // TODO: Same as above. + if newParent == root && newName == "." { + return syscall.EBUSY + } + return fs.Rename(t, root, oldParent, oldName, newParent, newName) + }) + }) +} + +// Rename implements linux syscall rename(2). +func Rename(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldPathAddr := args[0].Pointer() + newPathAddr := args[1].Pointer() + return 0, nil, renameAt(t, linux.AT_FDCWD, oldPathAddr, linux.AT_FDCWD, newPathAddr) +} + +// Renameat implements linux syscall renameat(2). +func Renameat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldDirFD := kdefs.FD(args[0].Int()) + oldPathAddr := args[1].Pointer() + newDirFD := kdefs.FD(args[2].Int()) + newPathAddr := args[3].Pointer() + return 0, nil, renameAt(t, oldDirFD, oldPathAddr, newDirFD, newPathAddr) +} + +// Fallocate implements linux system call fallocate(2). +// (well, not really, but at least we return the expected error codes) +func Fallocate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + offset := args[2].Int64() + length := args[3].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + if offset < 0 || length <= 0 { + return 0, nil, syserror.EINVAL + } + + return 0, nil, syserror.EOPNOTSUPP +} + +// Flock implements linux syscall flock(2). +func Flock(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + operation := args[1].Int() + + file := t.FDMap().GetFile(fd) + if file == nil { + // flock(2): EBADF fd is not an open file descriptor. + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + nonblocking := operation&linux.LOCK_NB != 0 + operation &^= linux.LOCK_NB + + // flock(2): + // Locks created by flock() are associated with an open file table entry. This means that + // duplicate file descriptors (created by, for example, fork(2) or dup(2)) refer to the + // same lock, and this lock may be modified or released using any of these descriptors. Furthermore, + // the lock is released either by an explicit LOCK_UN operation on any of these duplicate + // descriptors, or when all such descriptors have been closed. + // + // If a process uses open(2) (or similar) to obtain more than one descriptor for the same file, + // these descriptors are treated independently by flock(). An attempt to lock the file using + // one of these file descriptors may be denied by a lock that the calling process has already placed via + // another descriptor. + // + // We use the File UniqueID as the lock UniqueID because it needs to reference the same lock across dup(2) + // and fork(2). + lockUniqueID := lock.UniqueID(file.UniqueID) + + // A BSD style lock spans the entire file. + rng := lock.LockRange{ + Start: 0, + End: lock.LockEOF, + } + + switch operation { + case linux.LOCK_EX: + if nonblocking { + // Since we're nonblocking we pass a nil lock.Blocker implementation. + if !file.Dirent.Inode.LockCtx.BSD.LockRegion(lockUniqueID, lock.WriteLock, rng, nil) { + return 0, nil, syserror.EWOULDBLOCK + } + } else { + // Because we're blocking we will pass the task to satisfy the lock.Blocker interface. + if !file.Dirent.Inode.LockCtx.BSD.LockRegion(lockUniqueID, lock.WriteLock, rng, t) { + return 0, nil, syserror.EINTR + } + } + case linux.LOCK_SH: + if nonblocking { + // Since we're nonblocking we pass a nil lock.Blocker implementation. + if !file.Dirent.Inode.LockCtx.BSD.LockRegion(lockUniqueID, lock.ReadLock, rng, nil) { + return 0, nil, syserror.EWOULDBLOCK + } + } else { + // Because we're blocking we will pass the task to satisfy the lock.Blocker interface. + if !file.Dirent.Inode.LockCtx.BSD.LockRegion(lockUniqueID, lock.ReadLock, rng, t) { + return 0, nil, syserror.EINTR + } + } + case linux.LOCK_UN: + file.Dirent.Inode.LockCtx.BSD.UnlockRegion(lockUniqueID, rng) + default: + // flock(2): EINVAL operation is invalid. + return 0, nil, syserror.EINVAL + } + + return 0, nil, nil +} + +// Sendfile implements linux system call sendfile(2). +func Sendfile(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + outFD := kdefs.FD(args[0].Int()) + inFD := kdefs.FD(args[1].Int()) + offsetAddr := args[2].Pointer() + count := int64(args[3].SizeT()) + + // Don't send a negative number of bytes. + if count < 0 { + return 0, nil, syserror.EINVAL + } + + // Get files. + outFile := t.FDMap().GetFile(outFD) + if outFile == nil { + return 0, nil, syserror.EBADF + } + defer outFile.DecRef() + + inFile := t.FDMap().GetFile(inFD) + if inFile == nil { + return 0, nil, syserror.EBADF + } + defer inFile.DecRef() + + // Verify that the outfile is writable. + outFlags := outFile.Flags() + if !outFlags.Write { + return 0, nil, syserror.EBADF + } + + // Verify that the outfile Append flag is not set. + if outFlags.Append { + return 0, nil, syserror.EINVAL + } + + // Verify that we have a regular infile. + // http://elixir.free-electrons.com/linux/latest/source/fs/splice.c#L933 + if !fs.IsRegular(inFile.Dirent.Inode.StableAttr) { + return 0, nil, syserror.EINVAL + } + + // Verify that the infile is readable. + if !inFile.Flags().Read { + return 0, nil, syserror.EBADF + } + + // Setup for sending data. + var offset uint64 + var n int64 + var err error + w := &fs.FileWriter{t, outFile} + hasOffset := offsetAddr != 0 + // If we have a provided offset. + if hasOffset { + // Copy in the offset. + if _, err := t.CopyIn(offsetAddr, &offset); err != nil { + return 0, nil, err + } + // Send data using Preadv. + r := io.NewSectionReader(&fs.FileReader{t, inFile}, int64(offset), count) + n, err = io.Copy(w, r) + // Copy out the new offset. + if _, err := t.CopyOut(offsetAddr, n+int64(offset)); err != nil { + return 0, nil, err + } + // If we don't have a provided offset. + } else { + // Send data using readv. + r := &io.LimitedReader{R: &fs.FileReader{t, inFile}, N: count} + n, err = io.Copy(w, r) + } + + // We can only pass a single file to handleIOError, so pick inFile + // arbitrarily. + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "sendfile", inFile) +} diff --git a/pkg/sentry/syscalls/linux/sys_futex.go b/pkg/sentry/syscalls/linux/sys_futex.go new file mode 100644 index 000000000..57762d058 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_futex.go @@ -0,0 +1,319 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// futexChecker is a futex.Checker that uses a Task's MemoryManager. +type futexChecker struct { + t *kernel.Task +} + +// Check checks if the address contains the given value, and returns +// syserror.EAGAIN if it doesn't. See Checker interface in futex package +// for more information. +func (f futexChecker) Check(addr uintptr, val uint32) error { + in := f.t.CopyScratchBuffer(4) + _, err := f.t.CopyInBytes(usermem.Addr(addr), in) + if err != nil { + return err + } + nval := usermem.ByteOrder.Uint32(in) + if val != nval { + return syserror.EAGAIN + } + return nil +} + +func (f futexChecker) atomicOp(addr uintptr, op func(uint32) uint32) (uint32, error) { + in := f.t.CopyScratchBuffer(4) + _, err := f.t.CopyInBytes(usermem.Addr(addr), in) + if err != nil { + return 0, err + } + o := usermem.ByteOrder.Uint32(in) + mm := f.t.MemoryManager() + for { + n := op(o) + r, err := mm.CompareAndSwapUint32(f.t, usermem.Addr(addr), o, n, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + if r == o { + return o, nil + } + o = r + } +} + +// Op performs an operation on addr and returns a result based on the operation. +func (f futexChecker) Op(addr uintptr, opIn uint32) (bool, error) { + op := (opIn >> 28) & 0xf + cmp := (opIn >> 24) & 0xf + opArg := (opIn >> 12) & 0xfff + cmpArg := opIn & 0xfff + + if op&linux.FUTEX_OP_OPARG_SHIFT != 0 { + opArg = 1 << opArg + op &^= linux.FUTEX_OP_OPARG_SHIFT // clear flag + } + + var oldVal uint32 + var err error + switch op { + case linux.FUTEX_OP_SET: + oldVal, err = f.t.MemoryManager().SwapUint32(f.t, usermem.Addr(addr), opArg, usermem.IOOpts{ + AddressSpaceActive: true, + }) + case linux.FUTEX_OP_ADD: + oldVal, err = f.atomicOp(addr, func(a uint32) uint32 { + return a + opArg + }) + case linux.FUTEX_OP_OR: + oldVal, err = f.atomicOp(addr, func(a uint32) uint32 { + return a | opArg + }) + case linux.FUTEX_OP_ANDN: + oldVal, err = f.atomicOp(addr, func(a uint32) uint32 { + return a & ^opArg + }) + case linux.FUTEX_OP_XOR: + oldVal, err = f.atomicOp(addr, func(a uint32) uint32 { + return a ^ opArg + }) + default: + return false, syserror.ENOSYS + } + if err != nil { + return false, err + } + + switch cmp { + case linux.FUTEX_OP_CMP_EQ: + return oldVal == cmpArg, nil + case linux.FUTEX_OP_CMP_NE: + return oldVal != cmpArg, nil + case linux.FUTEX_OP_CMP_LT: + return oldVal < cmpArg, nil + case linux.FUTEX_OP_CMP_LE: + return oldVal <= cmpArg, nil + case linux.FUTEX_OP_CMP_GT: + return oldVal > cmpArg, nil + case linux.FUTEX_OP_CMP_GE: + return oldVal >= cmpArg, nil + default: + return false, syserror.ENOSYS + } +} + +// futexWaitRestartBlock encapsulates the state required to restart futex(2) +// via restart_syscall(2). +type futexWaitRestartBlock struct { + duration time.Duration + + // addr stored as uint64 since uintptr is not save-able. + addr uint64 + + val uint32 + mask uint32 +} + +// Restart implements kernel.SyscallRestartBlock.Restart. +func (f *futexWaitRestartBlock) Restart(t *kernel.Task) (uintptr, error) { + return futexWaitDuration(t, f.duration, false, uintptr(f.addr), f.val, f.mask) +} + +// futexWaitAbsolute performs a FUTEX_WAIT_BITSET, blocking until the wait is +// complete. +// +// The wait blocks forever if forever is true, otherwise it blocks until ts. +// +// If blocking is interrupted, the syscall is restarted with the original +// arguments. +func futexWaitAbsolute(t *kernel.Task, clockRealtime bool, ts linux.Timespec, forever bool, addr uintptr, val, mask uint32) (uintptr, error) { + w := t.FutexWaiter() + err := t.Futex().WaitPrepare(w, futexChecker{t}, addr, val, mask) + if err != nil { + return 0, err + } + + if forever { + err = t.Block(w.C) + } else if clockRealtime { + notifier, tchan := ktime.NewChannelNotifier() + timer := ktime.NewTimer(t.Kernel().RealtimeClock(), notifier) + timer.Swap(ktime.Setting{ + Enabled: true, + Next: ktime.FromTimespec(ts), + }) + err = t.BlockWithTimer(w.C, tchan) + timer.Destroy() + } else { + err = t.BlockWithDeadline(w.C, true, ktime.FromTimespec(ts)) + } + + t.Futex().WaitComplete(w) + return 0, syserror.ConvertIntr(err, kernel.ERESTARTSYS) +} + +// futexWaitDuration performs a FUTEX_WAIT, blocking until the wait is +// complete. +// +// The wait blocks forever if forever is true, otherwise is blocks for +// duration. +// +// If blocking is interrupted, forever determines how to restart the +// syscall. If forever is true, the syscall is restarted with the original +// arguments. If forever is false, duration is a relative timeout and the +// syscall is restarted with the remaining timeout. +func futexWaitDuration(t *kernel.Task, duration time.Duration, forever bool, addr uintptr, val, mask uint32) (uintptr, error) { + w := t.FutexWaiter() + err := t.Futex().WaitPrepare(w, futexChecker{t}, addr, val, mask) + if err != nil { + return 0, err + } + + remaining, err := t.BlockWithTimeout(w.C, !forever, duration) + t.Futex().WaitComplete(w) + if err == nil { + return 0, nil + } + + // The wait was unsuccessful for some reason other than interruption. Simply + // forward the error. + if err != syserror.ErrInterrupted { + return 0, err + } + + // The wait was interrupted and we need to restart. Decide how. + + // The wait duration was absolute, restart with the original arguments. + if forever { + return 0, kernel.ERESTARTSYS + } + + // The wait duration was relative, restart with the remaining duration. + t.SetSyscallRestartBlock(&futexWaitRestartBlock{ + duration: remaining, + addr: uint64(addr), + val: val, + mask: mask, + }) + return 0, kernel.ERESTART_RESTARTBLOCK +} + +// Futex implements linux syscall futex(2). +// It provides a method for a program to wait for a value at a given address to +// change, and a method to wake up anyone waiting on a particular address. +func Futex(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + uaddr := args[0].Pointer() + futexOp := args[1].Int() + val := int(args[2].Int()) + nreq := int(args[3].Int()) + timeout := args[3].Pointer() + uaddr2 := args[4].Pointer() + val3 := args[5].Int() + + addr := uintptr(uaddr) + naddr := uintptr(uaddr2) + cmd := futexOp &^ (linux.FUTEX_PRIVATE_FLAG | linux.FUTEX_CLOCK_REALTIME) + clockRealtime := (futexOp & linux.FUTEX_CLOCK_REALTIME) == linux.FUTEX_CLOCK_REALTIME + mask := uint32(val3) + + switch cmd { + case linux.FUTEX_WAIT, linux.FUTEX_WAIT_BITSET: + // WAIT{_BITSET} wait forever if the timeout isn't passed. + forever := timeout == 0 + + var timespec linux.Timespec + if !forever { + var err error + timespec, err = copyTimespecIn(t, timeout) + if err != nil { + return 0, nil, err + } + } + + switch cmd { + case linux.FUTEX_WAIT: + // WAIT uses a relative timeout. + mask = ^uint32(0) + var timeoutDur time.Duration + if !forever { + timeoutDur = time.Duration(timespec.ToNsecCapped()) * time.Nanosecond + } + n, err := futexWaitDuration(t, timeoutDur, forever, addr, uint32(val), mask) + return n, nil, err + + case linux.FUTEX_WAIT_BITSET: + // WAIT_BITSET uses an absolute timeout which is either + // CLOCK_MONOTONIC or CLOCK_REALTIME. + if mask == 0 { + return 0, nil, syserror.EINVAL + } + n, err := futexWaitAbsolute(t, clockRealtime, timespec, forever, addr, uint32(val), mask) + return n, nil, err + default: + panic("unreachable") + } + + case linux.FUTEX_WAKE: + mask = ^uint32(0) + fallthrough + + case linux.FUTEX_WAKE_BITSET: + if mask == 0 { + return 0, nil, syserror.EINVAL + } + n, err := t.Futex().Wake(addr, mask, val) + return uintptr(n), nil, err + + case linux.FUTEX_REQUEUE: + n, err := t.Futex().Requeue(addr, naddr, val, nreq) + return uintptr(n), nil, err + + case linux.FUTEX_CMP_REQUEUE: + // 'val3' contains the value to be checked at 'addr' and + // 'val' is the number of waiters that should be woken up. + nval := uint32(val3) + n, err := t.Futex().RequeueCmp(futexChecker{t}, addr, nval, naddr, val, nreq) + return uintptr(n), nil, err + + case linux.FUTEX_WAKE_OP: + op := uint32(val3) + n, err := t.Futex().WakeOp(futexChecker{t}, addr, naddr, val, nreq, op) + return uintptr(n), nil, err + + case linux.FUTEX_LOCK_PI, linux.FUTEX_UNLOCK_PI, linux.FUTEX_TRYLOCK_PI, linux.FUTEX_WAIT_REQUEUE_PI, linux.FUTEX_CMP_REQUEUE_PI: + // We don't support any priority inversion futexes. + return 0, nil, syserror.ENOSYS + + default: + // We don't even know about this command. + return 0, nil, syserror.ENOSYS + } +} diff --git a/pkg/sentry/syscalls/linux/sys_getdents.go b/pkg/sentry/syscalls/linux/sys_getdents.go new file mode 100644 index 000000000..178714b07 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_getdents.go @@ -0,0 +1,269 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "bytes" + "io" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Getdents implements linux syscall getdents(2) for 64bit systems. +func Getdents(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := int(args[2].Uint()) + + minSize := int(smallestDirent(t.Arch())) + if size < minSize { + // size is smaller than smallest possible dirent. + return 0, nil, syserror.EINVAL + } + + n, err := getdents(t, fd, addr, size, (*dirent).Serialize) + return n, nil, err +} + +// Getdents64 implements linux syscall getdents64(2). +func Getdents64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := int(args[2].Uint()) + + minSize := int(smallestDirent64(t.Arch())) + if size < minSize { + // size is smaller than smallest possible dirent. + return 0, nil, syserror.EINVAL + } + + n, err := getdents(t, fd, addr, size, (*dirent).Serialize64) + return n, nil, err +} + +// getdents implements the core of getdents(2)/getdents64(2). +// f is the syscall implementation dirent serialization function. +func getdents(t *kernel.Task, fd kdefs.FD, addr usermem.Addr, size int, f func(*dirent, io.Writer) (int, error)) (uintptr, error) { + dir := t.FDMap().GetFile(fd) + if dir == nil { + return 0, syserror.EBADF + } + defer dir.DecRef() + + w := &usermem.IOReadWriter{ + Ctx: t, + IO: t.MemoryManager(), + Addr: addr, + Opts: usermem.IOOpts{ + AddressSpaceActive: true, + }, + } + + ds := newDirentSerializer(f, w, t.Arch(), size) + rerr := dir.Readdir(t, ds) + + switch err := handleIOError(t, ds.Written() > 0, rerr, kernel.ERESTARTSYS, "getdents", dir); err { + case nil: + dir.Dirent.InotifyEvent(syscall.IN_ACCESS, 0) + return uintptr(ds.Written()), nil + case io.EOF: + return 0, nil + default: + return 0, err + } +} + +// oldDirentHdr is a fixed sized header matching the fixed size +// fields found in the old linux dirent struct. +type oldDirentHdr struct { + Ino uint64 + Off uint64 + Reclen uint16 +} + +// direntHdr is a fixed sized header matching the fixed size +// fields found in the new linux dirent struct. +type direntHdr struct { + OldHdr oldDirentHdr + Typ uint8 +} + +// dirent contains the data pointed to by a new linux dirent struct. +type dirent struct { + Hdr direntHdr + Name []byte +} + +// newDirent returns a dirent from an fs.InodeOperationsInfo. +func newDirent(width uint, name string, attr fs.DentAttr, offset uint64) *dirent { + d := &dirent{ + Hdr: direntHdr{ + OldHdr: oldDirentHdr{ + Ino: attr.InodeID, + Off: offset, + }, + Typ: toType(attr.Type), + }, + Name: []byte(name), + } + d.Hdr.OldHdr.Reclen = d.padRec(int(width)) + return d +} + +// smallestDirent returns the size of the smallest possible dirent using +// the old linux dirent format. +func smallestDirent(a arch.Context) uint { + d := dirent{} + return uint(binary.Size(d.Hdr.OldHdr)) + a.Width() + 1 +} + +// smallestDirent64 returns the size of the smallest possible dirent using +// the new linux dirent format. +func smallestDirent64(a arch.Context) uint { + d := dirent{} + return uint(binary.Size(d.Hdr)) + a.Width() +} + +// toType converts an fs.InodeOperationsInfo to a linux dirent typ field. +func toType(nodeType fs.InodeType) uint8 { + switch nodeType { + case fs.RegularFile, fs.SpecialFile: + return syscall.DT_REG + case fs.Symlink: + return syscall.DT_LNK + case fs.Directory: + return syscall.DT_DIR + case fs.Pipe: + return syscall.DT_FIFO + case fs.CharacterDevice: + return syscall.DT_CHR + case fs.BlockDevice: + return syscall.DT_BLK + case fs.Socket: + return syscall.DT_SOCK + default: + return syscall.DT_UNKNOWN + } +} + +// padRec pads the name field until the rec length is a multiple of the width, +// which must be a power of 2. It returns the padded rec length. +func (d *dirent) padRec(width int) uint16 { + a := int(binary.Size(d.Hdr)) + len(d.Name) + r := (a + width) &^ (width - 1) + padding := r - a + d.Name = append(d.Name, make([]byte, padding)...) + return uint16(r) +} + +// Serialize64 serializes a Dirent struct to a byte slice, keeping the new +// linux dirent format. Returns the number of bytes serialized or an error. +func (d *dirent) Serialize64(w io.Writer) (int, error) { + n1, err := w.Write(binary.Marshal(nil, usermem.ByteOrder, d.Hdr)) + if err != nil { + return 0, err + } + n2, err := w.Write(d.Name) + if err != nil { + return 0, err + } + return n1 + n2, nil +} + +// Serialize serializes a Dirent struct to a byte slice, using the old linux +// dirent format. +// Returns the number of bytes serialized or an error. +func (d *dirent) Serialize(w io.Writer) (int, error) { + n1, err := w.Write(binary.Marshal(nil, usermem.ByteOrder, d.Hdr.OldHdr)) + if err != nil { + return 0, err + } + n2, err := w.Write(d.Name) + if err != nil { + return 0, err + } + n3, err := w.Write([]byte{d.Hdr.Typ}) + if err != nil { + return 0, err + } + return n1 + n2 + n3, nil +} + +// direntSerializer implements fs.InodeOperationsInfoSerializer, serializing dirents to an +// io.Writer. +type direntSerializer struct { + serialize func(*dirent, io.Writer) (int, error) + w io.Writer + // width is the arch native value width. + width uint + // offset is the current dirent offset. + offset uint64 + // written is the total bytes serialized. + written int + // size is the size of the buffer to serialize into. + size int +} + +func newDirentSerializer(f func(d *dirent, w io.Writer) (int, error), w io.Writer, ac arch.Context, size int) *direntSerializer { + return &direntSerializer{ + serialize: f, + w: w, + width: ac.Width(), + size: size, + } +} + +// CopyOut implements fs.InodeOperationsInfoSerializer.CopyOut. +// It serializes and writes the fs.DentAttr to the direntSerializer io.Writer. +func (ds *direntSerializer) CopyOut(name string, attr fs.DentAttr) error { + ds.offset++ + + d := newDirent(ds.width, name, attr, ds.offset) + + // Serialize dirent into a temp buffer. + var b bytes.Buffer + n, err := ds.serialize(d, &b) + if err != nil { + ds.offset-- + return err + } + + // Check that we have enough room remaining to write the dirent. + if n > (ds.size - ds.written) { + ds.offset-- + return io.EOF + } + + // Write out the temp buffer. + if _, err := b.WriteTo(ds.w); err != nil { + ds.offset-- + return err + } + + ds.written += n + return nil +} + +// Written returns the total number of bytes written. +func (ds *direntSerializer) Written() int { + return ds.written +} diff --git a/pkg/sentry/syscalls/linux/sys_identity.go b/pkg/sentry/syscalls/linux/sys_identity.go new file mode 100644 index 000000000..4fd0ed794 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_identity.go @@ -0,0 +1,180 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + // As NGROUPS_MAX in include/uapi/linux/limits.h. + maxNGroups = 65536 +) + +// Getuid implements the Linux syscall getuid. +func Getuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + c := t.Credentials() + ruid := c.RealKUID.In(c.UserNamespace).OrOverflow() + return uintptr(ruid), nil, nil +} + +// Geteuid implements the Linux syscall geteuid. +func Geteuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + c := t.Credentials() + euid := c.EffectiveKUID.In(c.UserNamespace).OrOverflow() + return uintptr(euid), nil, nil +} + +// Getresuid implements the Linux syscall getresuid. +func Getresuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + ruidAddr := args[0].Pointer() + euidAddr := args[1].Pointer() + suidAddr := args[2].Pointer() + c := t.Credentials() + ruid := c.RealKUID.In(c.UserNamespace).OrOverflow() + euid := c.EffectiveKUID.In(c.UserNamespace).OrOverflow() + suid := c.SavedKUID.In(c.UserNamespace).OrOverflow() + if _, err := t.CopyOut(ruidAddr, ruid); err != nil { + return 0, nil, err + } + if _, err := t.CopyOut(euidAddr, euid); err != nil { + return 0, nil, err + } + if _, err := t.CopyOut(suidAddr, suid); err != nil { + return 0, nil, err + } + return 0, nil, nil +} + +// Getgid implements the Linux syscall getgid. +func Getgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + c := t.Credentials() + rgid := c.RealKGID.In(c.UserNamespace).OrOverflow() + return uintptr(rgid), nil, nil +} + +// Getegid implements the Linux syscall getegid. +func Getegid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + c := t.Credentials() + egid := c.EffectiveKGID.In(c.UserNamespace).OrOverflow() + return uintptr(egid), nil, nil +} + +// Getresgid implements the Linux syscall getresgid. +func Getresgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + rgidAddr := args[0].Pointer() + egidAddr := args[1].Pointer() + sgidAddr := args[2].Pointer() + c := t.Credentials() + rgid := c.RealKGID.In(c.UserNamespace).OrOverflow() + egid := c.EffectiveKGID.In(c.UserNamespace).OrOverflow() + sgid := c.SavedKGID.In(c.UserNamespace).OrOverflow() + if _, err := t.CopyOut(rgidAddr, rgid); err != nil { + return 0, nil, err + } + if _, err := t.CopyOut(egidAddr, egid); err != nil { + return 0, nil, err + } + if _, err := t.CopyOut(sgidAddr, sgid); err != nil { + return 0, nil, err + } + return 0, nil, nil +} + +// Setuid implements the Linux syscall setuid. +func Setuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + uid := auth.UID(args[0].Int()) + return 0, nil, t.SetUID(uid) +} + +// Setreuid implements the Linux syscall setreuid. +func Setreuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + ruid := auth.UID(args[0].Int()) + euid := auth.UID(args[1].Int()) + return 0, nil, t.SetREUID(ruid, euid) +} + +// Setresuid implements the Linux syscall setreuid. +func Setresuid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + ruid := auth.UID(args[0].Int()) + euid := auth.UID(args[1].Int()) + suid := auth.UID(args[2].Int()) + return 0, nil, t.SetRESUID(ruid, euid, suid) +} + +// Setgid implements the Linux syscall setgid. +func Setgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + gid := auth.GID(args[0].Int()) + return 0, nil, t.SetGID(gid) +} + +// Setregid implements the Linux syscall setregid. +func Setregid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + rgid := auth.GID(args[0].Int()) + egid := auth.GID(args[1].Int()) + return 0, nil, t.SetREGID(rgid, egid) +} + +// Setresgid implements the Linux syscall setregid. +func Setresgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + rgid := auth.GID(args[0].Int()) + egid := auth.GID(args[1].Int()) + sgid := auth.GID(args[2].Int()) + return 0, nil, t.SetRESGID(rgid, egid, sgid) +} + +// Getgroups implements the Linux syscall getgroups. +func Getgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + size := int(args[0].Int()) + if size < 0 { + return 0, nil, syserror.EINVAL + } + kgids := t.Credentials().ExtraKGIDs + // "If size is zero, list is not modified, but the total number of + // supplementary group IDs for the process is returned." - getgroups(2) + if size == 0 { + return uintptr(len(kgids)), nil, nil + } + if size < len(kgids) { + return 0, nil, syserror.EINVAL + } + gids := make([]auth.GID, len(kgids)) + for i, kgid := range kgids { + gids[i] = kgid.In(t.UserNamespace()).OrOverflow() + } + if _, err := t.CopyOut(args[1].Pointer(), gids); err != nil { + return 0, nil, err + } + return uintptr(len(gids)), nil, nil +} + +// Setgroups implements the Linux syscall setgroups. +func Setgroups(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + size := args[0].Int() + if size < 0 || size > maxNGroups { + return 0, nil, syserror.EINVAL + } + if size == 0 { + return 0, nil, t.SetExtraGIDs(nil) + } + gids := make([]auth.GID, size) + if _, err := t.CopyIn(args[1].Pointer(), &gids); err != nil { + return 0, nil, err + } + return 0, nil, t.SetExtraGIDs(gids) +} diff --git a/pkg/sentry/syscalls/linux/sys_inotify.go b/pkg/sentry/syscalls/linux/sys_inotify.go new file mode 100644 index 000000000..725204dff --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_inotify.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/anon" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" +) + +const allFlags = int(linux.IN_NONBLOCK | linux.IN_CLOEXEC) + +// InotifyInit1 implements the inotify_init1() syscalls. +func InotifyInit1(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + flags := int(args[0].Int()) + + if flags&^allFlags != 0 { + return 0, nil, syscall.EINVAL + } + + dirent := fs.NewDirent(anon.NewInode(t), "inotify") + fileFlags := fs.FileFlags{ + Read: true, + Write: true, + NonBlocking: flags&linux.IN_NONBLOCK != 0, + } + n := fs.NewFile(t, dirent, fileFlags, fs.NewInotify(t)) + defer n.DecRef() + + fd, err := t.FDMap().NewFDFrom(0, n, kernel.FDFlags{ + CloseOnExec: flags&linux.IN_CLOEXEC != 0, + }, t.ThreadGroup().Limits()) + + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// InotifyInit implements the inotify_init() syscalls. +func InotifyInit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + args[0].Value = 0 + return InotifyInit1(t, args) +} + +// fdToInotify resolves an fd to an inotify object. If successful, the file will +// have an extra ref and the caller is responsible for releasing the ref. +func fdToInotify(t *kernel.Task, fd kdefs.FD) (*fs.Inotify, *fs.File, error) { + file := t.FDMap().GetFile(fd) + if file == nil { + // Invalid fd. + return nil, nil, syscall.EBADF + } + + ino, ok := file.FileOperations.(*fs.Inotify) + if !ok { + // Not an inotify fd. + file.DecRef() + return nil, nil, syscall.EINVAL + } + + return ino, file, nil +} + +// InotifyAddWatch implements the inotify_add_watch() syscall. +func InotifyAddWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + mask := args[2].Uint() + + // "IN_DONT_FOLLOW: Don't dereference pathname if it is a symbolic link." + // -- inotify(7) + resolve := mask&linux.IN_DONT_FOLLOW == 0 + + // "EINVAL: The given event mask contains no valid events." + // -- inotify_add_watch(2) + if validBits := mask & linux.ALL_INOTIFY_BITS; validBits == 0 { + return 0, nil, syscall.EINVAL + } + + ino, file, err := fdToInotify(t, fd) + if err != nil { + return 0, nil, err + } + defer file.DecRef() + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + err = fileOpOn(t, linux.AT_FDCWD, path, resolve, func(root *fs.Dirent, dirent *fs.Dirent) error { + // "IN_ONLYDIR: Only watch pathname if it is a directory." -- inotify(7) + if onlyDir := mask&linux.IN_ONLYDIR != 0; onlyDir && !fs.IsDir(dirent.Inode.StableAttr) { + return syscall.ENOTDIR + } + + // Copy out to the return frame. + fd = kdefs.FD(ino.AddWatch(dirent, mask)) + + return nil + }) + return uintptr(fd), nil, err // Return from the existing value. +} + +// InotifyRmWatch implements the inotify_rm_watch() syscall. +func InotifyRmWatch(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + wd := args[1].Int() + + ino, file, err := fdToInotify(t, fd) + if err != nil { + return 0, nil, err + } + defer file.DecRef() + return 0, nil, ino.RmWatch(wd) +} diff --git a/pkg/sentry/syscalls/linux/sys_lseek.go b/pkg/sentry/syscalls/linux/sys_lseek.go new file mode 100644 index 000000000..97b51ba7c --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_lseek.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Lseek implements linux syscall lseek(2). +func Lseek(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + offset := args[1].Int64() + whence := args[2].Int() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + var sw fs.SeekWhence + switch whence { + case 0: + sw = fs.SeekSet + case 1: + sw = fs.SeekCurrent + case 2: + sw = fs.SeekEnd + default: + return 0, nil, syserror.EINVAL + } + + offset, serr := file.Seek(t, sw, offset) + err := handleIOError(t, false /* partialResult */, serr, kernel.ERESTARTSYS, "lseek", file) + if err != nil { + return 0, nil, err + } + return uintptr(offset), nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go new file mode 100644 index 000000000..2c7d41de0 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_mmap.go @@ -0,0 +1,435 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "bytes" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/memmap" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Brk implements linux syscall brk(2). +func Brk(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr, _ := t.MemoryManager().Brk(t, args[0].Pointer()) + // "However, the actual Linux system call returns the new program break on + // success. On failure, the system call returns the current break." - + // brk(2) + return uintptr(addr), nil, nil +} + +// Mmap implements linux syscall mmap(2). +func Mmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + prot := args[2].Int() + flags := args[3].Int() + fd := kdefs.FD(args[4].Int()) + fixed := flags&linux.MAP_FIXED != 0 + private := flags&linux.MAP_PRIVATE != 0 + shared := flags&linux.MAP_SHARED != 0 + anon := flags&linux.MAP_ANONYMOUS != 0 + + // Require exactly one of MAP_PRIVATE and MAP_SHARED. + if private == shared { + return 0, nil, syserror.EINVAL + } + + opts := memmap.MMapOpts{ + Length: args[1].Uint64(), + Offset: args[5].Uint64(), + Addr: args[0].Pointer(), + Fixed: fixed, + Unmap: fixed, + Private: private, + Perms: usermem.AccessType{ + Read: linux.PROT_READ&prot != 0, + Write: linux.PROT_WRITE&prot != 0, + Execute: linux.PROT_EXEC&prot != 0, + }, + MaxPerms: usermem.AnyAccess, + GrowsDown: linux.MAP_GROWSDOWN&flags != 0, + Precommit: linux.MAP_POPULATE&flags != 0, + } + defer func() { + if opts.MappingIdentity != nil { + opts.MappingIdentity.DecRef() + } + }() + + if !anon { + // Convert the passed FD to a file reference. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + flags := file.Flags() + // mmap unconditionally requires that the FD is readable. + if !flags.Read { + return 0, nil, syserror.EACCES + } + // MAP_SHARED requires that the FD be writable for PROT_WRITE. + if shared && !flags.Write { + opts.MaxPerms.Write = false + } + + if err := file.ConfigureMMap(t, &opts); err != nil { + return 0, nil, err + } + } + + rv, err := t.MemoryManager().MMap(t, opts) + return uintptr(rv), nil, err +} + +// Munmap implements linux syscall munmap(2). +func Munmap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, t.MemoryManager().MUnmap(t, args[0].Pointer(), args[1].Uint64()) +} + +// Mremap implements linux syscall mremap(2). +func Mremap(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + oldAddr := args[0].Pointer() + oldSize := args[1].Uint64() + newSize := args[2].Uint64() + flags := args[3].Uint64() + newAddr := args[4].Pointer() + + if flags&^(linux.MREMAP_MAYMOVE|linux.MREMAP_FIXED) != 0 { + return 0, nil, syserror.EINVAL + } + mayMove := flags&linux.MREMAP_MAYMOVE != 0 + fixed := flags&linux.MREMAP_FIXED != 0 + var moveMode mm.MRemapMoveMode + switch { + case !mayMove && !fixed: + moveMode = mm.MRemapNoMove + case mayMove && !fixed: + moveMode = mm.MRemapMayMove + case mayMove && fixed: + moveMode = mm.MRemapMustMove + case !mayMove && fixed: + // "If MREMAP_FIXED is specified, then MREMAP_MAYMOVE must also be + // specified." - mremap(2) + return 0, nil, syserror.EINVAL + } + + rv, err := t.MemoryManager().MRemap(t, oldAddr, oldSize, newSize, mm.MRemapOpts{ + Move: moveMode, + NewAddr: newAddr, + }) + return uintptr(rv), nil, err +} + +// Mprotect implements linux syscall mprotect(2). +func Mprotect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + length := args[1].Uint64() + prot := args[2].Int() + err := t.MemoryManager().MProtect(args[0].Pointer(), length, usermem.AccessType{ + Read: linux.PROT_READ&prot != 0, + Write: linux.PROT_WRITE&prot != 0, + Execute: linux.PROT_EXEC&prot != 0, + }, linux.PROT_GROWSDOWN&prot != 0) + return 0, nil, err +} + +// Madvise implements linux syscall madvise(2). +func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := uint64(args[1].SizeT()) + adv := args[2].Int() + + // "The Linux implementation requires that the address addr be + // page-aligned, and allows length to be zero." - madvise(2) + if addr.RoundDown() != addr { + return 0, nil, syserror.EINVAL + } + if length == 0 { + return 0, nil, nil + } + // Not explicitly stated: length need not be page-aligned. + lenAddr, ok := usermem.Addr(length).RoundUp() + if !ok { + return 0, nil, syserror.EINVAL + } + length = uint64(lenAddr) + + switch adv { + case linux.MADV_DONTNEED: + return 0, nil, t.MemoryManager().Decommit(addr, length) + case linux.MADV_HUGEPAGE, linux.MADV_NOHUGEPAGE: + fallthrough + case linux.MADV_MERGEABLE, linux.MADV_UNMERGEABLE: + fallthrough + case linux.MADV_NORMAL, linux.MADV_RANDOM, linux.MADV_SEQUENTIAL, linux.MADV_WILLNEED: + // Do nothing, we totally ignore the suggestions above. + return 0, nil, nil + case linux.MADV_REMOVE, linux.MADV_DOFORK, linux.MADV_DONTFORK: + // These "suggestions" have application-visible side effects, so we + // have to indicate that we don't support them. + return 0, nil, syserror.ENOSYS + case linux.MADV_HWPOISON: + // Only privileged processes are allowed to poison pages. + return 0, nil, syserror.EPERM + default: + // If adv is not a valid value tell the caller. + return 0, nil, syserror.EINVAL + } +} + +func copyOutIfNotNull(t *kernel.Task, ptr usermem.Addr, val interface{}) (int, error) { + if ptr != 0 { + return t.CopyOut(ptr, val) + } + return 0, nil +} + +// GetMempolicy implements the syscall get_mempolicy(2). +func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + mode := args[0].Pointer() + nodemask := args[1].Pointer() + maxnode := args[2].Uint() + addr := args[3].Pointer() + flags := args[4].Uint() + + memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0 + nodeFlag := flags&linux.MPOL_F_NODE != 0 + addrFlag := flags&linux.MPOL_F_ADDR != 0 + + // TODO: Once sysfs is implemented, report a single numa node in + // /sys/devices/system/node. + if nodemask != 0 && maxnode < 1 { + return 0, nil, syserror.EINVAL + } + + // 'addr' provided iff 'addrFlag' set. + if addrFlag == (addr == 0) { + return 0, nil, syserror.EINVAL + } + + // Default policy for the thread. + if flags == 0 { + policy, nodemaskVal := t.NumaPolicy() + if _, err := copyOutIfNotNull(t, mode, policy); err != nil { + return 0, nil, syserror.EFAULT + } + if _, err := copyOutIfNotNull(t, nodemask, nodemaskVal); err != nil { + return 0, nil, syserror.EFAULT + } + return 0, nil, nil + } + + // Report all nodes available to caller. + if memsAllowed { + // MPOL_F_NODE and MPOL_F_ADDR not allowed with MPOL_F_MEMS_ALLOWED. + if nodeFlag || addrFlag { + return 0, nil, syserror.EINVAL + } + + // Report a single numa node. + if _, err := copyOutIfNotNull(t, nodemask, uint32(0x1)); err != nil { + return 0, nil, syserror.EFAULT + } + return 0, nil, nil + } + + if addrFlag { + if nodeFlag { + // Return the id for the node where 'addr' resides, via 'mode'. + // + // The real get_mempolicy(2) allocates the page referenced by 'addr' + // by simulating a read, if it is unallocated before the call. It + // then returns the node the page is allocated on through the mode + // pointer. + b := t.CopyScratchBuffer(1) + _, err := t.CopyInBytes(addr, b) + if err != nil { + return 0, nil, syserror.EFAULT + } + if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil { + return 0, nil, syserror.EFAULT + } + } else { + storedPolicy, _ := t.NumaPolicy() + // Return the policy governing the memory referenced by 'addr'. + if _, err := copyOutIfNotNull(t, mode, int32(storedPolicy)); err != nil { + return 0, nil, syserror.EFAULT + } + } + return 0, nil, nil + } + + storedPolicy, _ := t.NumaPolicy() + if nodeFlag && (storedPolicy&^linux.MPOL_MODE_FLAGS == linux.MPOL_INTERLEAVE) { + // Policy for current thread is to interleave memory between + // nodes. Return the next node we'll allocate on. Since we only have a + // single node, this is always node 0. + if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil { + return 0, nil, syserror.EFAULT + } + return 0, nil, nil + } + + return 0, nil, syserror.EINVAL +} + +func allowedNodesMask() uint32 { + const maxNodes = 1 + return ^uint32((1 << maxNodes) - 1) +} + +// SetMempolicy implements the syscall set_mempolicy(2). +func SetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + modeWithFlags := args[0].Int() + nodemask := args[1].Pointer() + maxnode := args[2].Uint() + + if maxnode < 1 { + return 0, nil, syserror.EINVAL + } + + if modeWithFlags&linux.MPOL_MODE_FLAGS == linux.MPOL_MODE_FLAGS { + // Can't specify multiple modes simultaneously. Must also contain a + // valid mode, which we check below. + return 0, nil, syserror.EINVAL + } + + mode := modeWithFlags &^ linux.MPOL_MODE_FLAGS + if mode < 0 || mode >= linux.MPOL_MAX { + return 0, nil, syserror.EINVAL + } + + var nodemaskVal uint32 + if _, err := t.CopyIn(nodemask, &nodemaskVal); err != nil { + return 0, nil, syserror.EFAULT + } + + // When setting MPOL_INTERLEAVE, nodemask must not be empty. + if mode == linux.MPOL_INTERLEAVE && nodemaskVal == 0 { + return 0, nil, syserror.EINVAL + } + + if nodemaskVal&allowedNodesMask() != 0 { + // Invalid node specified. + return 0, nil, syserror.EINVAL + } + + t.SetNumaPolicy(int32(modeWithFlags), nodemaskVal) + + return 0, nil, nil +} + +// Mincore implements the syscall mincore(2). +func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := args[1].SizeT() + vec := args[2].Pointer() + + if addr != addr.RoundDown() { + return 0, nil, syserror.EINVAL + } + // "The length argument need not be a multiple of the page size, but since + // residency information is returned for whole pages, length is effectively + // rounded up to the next multiple of the page size." - mincore(2) + la, ok := usermem.Addr(length).RoundUp() + if !ok { + return 0, nil, syserror.ENOMEM + } + ar, ok := addr.ToRange(uint64(la)) + if !ok { + return 0, nil, syserror.ENOMEM + } + + // Pretend that all mapped pages are "resident in core". + mapped := t.MemoryManager().VirtualMemorySizeRange(ar) + // "ENOMEM: addr to addr + length contained unmapped memory." + if mapped != uint64(la) { + return 0, nil, syserror.ENOMEM + } + resident := bytes.Repeat([]byte{1}, int(mapped/usermem.PageSize)) + _, err := t.CopyOut(vec, resident) + return 0, nil, err +} + +// Msync implements Linux syscall msync(2). +func Msync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := args[1].SizeT() + flags := args[2].Int() + + if addr != addr.RoundDown() { + return 0, nil, syserror.EINVAL + } + if length == 0 { + return 0, nil, nil + } + la, ok := usermem.Addr(length).RoundUp() + if !ok { + return 0, nil, syserror.ENOMEM + } + // "The flags argument should specify exactly one of MS_ASYNC and MS_SYNC, + // and may additionally include the MS_INVALIDATE bit. ... However, Linux + // permits a call to msync() that specifies neither of these flags, with + // semantics that are (currently) equivalent to specifying MS_ASYNC." - + // msync(2) + if flags&^(linux.MS_ASYNC|linux.MS_SYNC|linux.MS_INVALIDATE) != 0 { + return 0, nil, syserror.EINVAL + } + sync := flags&linux.MS_SYNC != 0 + if sync && flags&linux.MS_ASYNC != 0 { + return 0, nil, syserror.EINVAL + } + + // MS_INVALIDATE "asks to invalidate other mappings of the same file (so + // that they can be updated with the fresh values just written)". This is a + // no-op given that shared memory exists. However, MS_INVALIDATE can also + // be used to detect mlocks: "EBUSY: MS_INVALIDATE was specified in flags, + // and a memory lock exists for the specified address range." Given that + // mlock is stubbed out, it's unsafe to pass MS_INVALIDATE silently since + // some user program could be using it for synchronization. + if flags&linux.MS_INVALIDATE != 0 { + return 0, nil, syserror.EINVAL + } + // MS_SYNC "requests an update and waits for it to complete." + if sync { + err := t.MemoryManager().Sync(t, addr, uint64(la)) + // Sync calls fsync, the same interrupt conversion rules apply, see + // mm/msync.c, fsync POSIX.1-2008. + return 0, nil, syserror.ConvertIntr(err, kernel.ERESTARTSYS) + } + // MS_ASYNC "specifies that an update be scheduled, but the call returns + // immediately". As long as dirty pages are tracked and eventually written + // back, this is a no-op. (Correspondingly: "Since Linux 2.6.19, MS_ASYNC + // is in fact a no-op, since the kernel properly tracks dirty pages and + // flushes them to storage as necessary.") + // + // However: "ENOMEM: The indicated memory (or part of it) was not mapped." + // This applies even for MS_ASYNC. + ar, ok := addr.ToRange(uint64(la)) + if !ok { + return 0, nil, syserror.ENOMEM + } + mapped := t.MemoryManager().VirtualMemorySizeRange(ar) + if mapped != uint64(la) { + return 0, nil, syserror.ENOMEM + } + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_mount.go b/pkg/sentry/syscalls/linux/sys_mount.go new file mode 100644 index 000000000..d70b79e4f --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_mount.go @@ -0,0 +1,140 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Mount implements Linux syscall mount(2). +func Mount(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + sourceAddr := args[0].Pointer() + targetAddr := args[1].Pointer() + typeAddr := args[2].Pointer() + flags := args[3].Uint64() + dataAddr := args[4].Pointer() + + fsType, err := t.CopyInString(typeAddr, usermem.PageSize) + if err != nil { + return 0, nil, err + } + + sourcePath, _, err := copyInPath(t, sourceAddr, true /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + targetPath, _, err := copyInPath(t, targetAddr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + // In Linux, a full page is always copied in regardless of null + // character placement, and the address is passed to each file system. + // Most file systems always treat this data as a string, though, and so + // do all of the ones we implement. + data, err := t.CopyInString(dataAddr, usermem.PageSize) + if err != nil { + return 0, nil, err + } + + // Ignore magic value that was required before Linux 2.4. + if flags&linux.MS_MGC_MSK == linux.MS_MGC_VAL { + flags = flags &^ linux.MS_MGC_MSK + } + + // Must have CAP_SYS_ADMIN in the mount namespace's associated user + // namespace. + if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespace().UserNamespace()) { + return 0, nil, syserror.EPERM + } + + const unsupportedOps = linux.MS_REMOUNT | linux.MS_BIND | + linux.MS_SHARED | linux.MS_PRIVATE | linux.MS_SLAVE | + linux.MS_UNBINDABLE | linux.MS_MOVE + + // Silently allow MS_NOSUID, since we don't implement set-id bits + // anyway. + const unsupportedFlags = linux.MS_NODEV | linux.MS_NOEXEC | + linux.MS_NODIRATIME | linux.MS_STRICTATIME + + // Linux just allows passing any flags to mount(2) - it won't fail when + // unknown or unsupported flags are passed. Since we don't implement + // everything, we fail explicitly on flags that are unimplemented. + if flags&(unsupportedOps|unsupportedFlags) != 0 { + return 0, nil, syserror.EINVAL + } + + rsys, ok := fs.FindFilesystem(fsType) + if !ok { + return 0, nil, syserror.ENODEV + } + if !rsys.AllowUserMount() { + return 0, nil, syserror.EPERM + } + + var superFlags fs.MountSourceFlags + if flags&linux.MS_NOATIME == linux.MS_NOATIME { + superFlags.NoAtime = true + } + if flags&linux.MS_RDONLY == linux.MS_RDONLY { + superFlags.ReadOnly = true + } + + rootInode, err := rsys.Mount(t, sourcePath, superFlags, data) + if err != nil { + return 0, nil, syserror.EINVAL + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, targetPath, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return t.MountNamespace().Mount(t, d, rootInode) + }) +} + +// Umount2 implements Linux syscall umount2(2). +func Umount2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + flags := args[1].Int() + + const unsupported = linux.MNT_FORCE | linux.MNT_EXPIRE + if flags&unsupported != 0 { + return 0, nil, syserror.EINVAL + } + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + // Must have CAP_SYS_ADMIN in the mount namespace's associated user + // namespace. + // + // Currently, this is always the init task's user namespace. + if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, t.MountNamespace().UserNamespace()) { + return 0, nil, syserror.EPERM + } + + resolve := flags&linux.UMOUNT_NOFOLLOW != linux.UMOUNT_NOFOLLOW + detachOnly := flags&linux.MNT_DETACH == linux.MNT_DETACH + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, resolve, func(root *fs.Dirent, d *fs.Dirent) error { + return t.MountNamespace().Unmount(t, d, detachOnly) + }) +} diff --git a/pkg/sentry/syscalls/linux/sys_pipe.go b/pkg/sentry/syscalls/linux/sys_pipe.go new file mode 100644 index 000000000..3efc06a27 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_pipe.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/pipe" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// pipe2 implements the actual system call with flags. +func pipe2(t *kernel.Task, addr usermem.Addr, flags uint) (uintptr, error) { + if flags&^(syscall.O_NONBLOCK|syscall.O_CLOEXEC) != 0 { + return 0, syscall.EINVAL + } + r, w := pipe.NewConnectedPipe(t, pipe.DefaultPipeSize, usermem.PageSize) + + r.SetFlags(linuxToFlags(flags).Settable()) + defer r.DecRef() + + w.SetFlags(linuxToFlags(flags).Settable()) + defer w.DecRef() + + rfd, err := t.FDMap().NewFDFrom(0, r, kernel.FDFlags{ + CloseOnExec: flags&syscall.O_CLOEXEC != 0}, + t.ThreadGroup().Limits()) + if err != nil { + return 0, err + } + + wfd, err := t.FDMap().NewFDFrom(0, w, kernel.FDFlags{ + CloseOnExec: flags&syscall.O_CLOEXEC != 0}, + t.ThreadGroup().Limits()) + if err != nil { + t.FDMap().Remove(rfd) + return 0, err + } + + if _, err := t.CopyOut(addr, []kdefs.FD{rfd, wfd}); err != nil { + t.FDMap().Remove(rfd) + t.FDMap().Remove(wfd) + return 0, syscall.EFAULT + } + return 0, nil +} + +// Pipe implements linux syscall pipe(2). +func Pipe(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + n, err := pipe2(t, addr, 0) + return n, nil, err +} + +// Pipe2 implements linux syscall pipe2(2). +func Pipe2(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + flags := uint(args[1].Uint()) + + n, err := pipe2(t, addr, flags) + return n, nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_poll.go b/pkg/sentry/syscalls/linux/sys_poll.go new file mode 100644 index 000000000..d4dbfd285 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_poll.go @@ -0,0 +1,429 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// fileCap is the maximum allowable files for poll & select. +const fileCap = 1024 * 1024 + +// Masks for "readable", "writable", and "exceptional" events as defined by +// select(2). +const ( + // selectReadEvents is analogous to the Linux kernel's + // fs/select.c:POLLIN_SET. + selectReadEvents = waiter.EventIn | waiter.EventHUp | waiter.EventErr + + // selectWriteEvents is analogous to the Linux kernel's + // fs/select.c:POLLOUT_SET. + selectWriteEvents = waiter.EventOut | waiter.EventErr + + // selectExceptEvents is analogous to the Linux kernel's + // fs/select.c:POLLEX_SET. + selectExceptEvents = waiter.EventPri +) + +func doPoll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (time.Duration, uintptr, error) { + if uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) { + return timeout, 0, syserror.EINVAL + } + + pfd := make([]syscalls.PollFD, nfds) + if nfds > 0 { + if _, err := t.CopyIn(pfdAddr, &pfd); err != nil { + return timeout, 0, err + } + } + + // Compatibility warning: Linux adds POLLHUP and POLLERR just before + // polling, in fs/select.c:do_pollfd(). Since pfd is copied out after + // polling, changing event masks here is an application-visible difference. + // (Linux also doesn't copy out event masks at all, only revents.) + for i := range pfd { + pfd[i].Events |= waiter.EventHUp | waiter.EventErr + } + remainingTimeout, n, err := syscalls.Poll(t, pfd, timeout) + err = syserror.ConvertIntr(err, syserror.EINTR) + + // The poll entries are copied out regardless of whether + // any are set or not. This aligns with the Linux behavior. + if nfds > 0 && err == nil { + if _, err := t.CopyOut(pfdAddr, pfd); err != nil { + return remainingTimeout, 0, err + } + } + + return remainingTimeout, n, err +} + +func doSelect(t *kernel.Task, nfds int, readFDs, writeFDs, exceptFDs usermem.Addr, timeout time.Duration) (uintptr, error) { + if nfds < 0 || uint64(nfds) > t.ThreadGroup().Limits().GetCapped(limits.NumberOfFiles, fileCap) { + return 0, syserror.EINVAL + } + + // Capture all the provided input vectors. + // + // N.B. This only works on little-endian architectures. + byteCount := (nfds + 7) / 8 + bitsInLastPartialByte := uint(nfds % 8) + r := make([]byte, byteCount) + w := make([]byte, byteCount) + e := make([]byte, byteCount) + + if readFDs != 0 { + if _, err := t.CopyIn(readFDs, &r); err != nil { + return 0, err + } + // Mask out bits above nfds. + if bitsInLastPartialByte != 0 { + r[byteCount-1] &^= byte(0xff) << bitsInLastPartialByte + } + } + + if writeFDs != 0 { + if _, err := t.CopyIn(writeFDs, &w); err != nil { + return 0, err + } + if bitsInLastPartialByte != 0 { + w[byteCount-1] &^= byte(0xff) << bitsInLastPartialByte + } + } + + if exceptFDs != 0 { + if _, err := t.CopyIn(exceptFDs, &e); err != nil { + return 0, err + } + if bitsInLastPartialByte != 0 { + e[byteCount-1] &^= byte(0xff) << bitsInLastPartialByte + } + } + + // Count how many FDs are actually being requested so that we can build + // a PollFD array. + fdCount := 0 + for i := 0; i < byteCount; i++ { + v := r[i] | w[i] | e[i] + for v != 0 { + v &= (v - 1) + fdCount++ + } + } + + // Build the PollFD array. + pfd := make([]syscalls.PollFD, 0, fdCount) + fd := kdefs.FD(0) + for i := 0; i < byteCount; i++ { + rV, wV, eV := r[i], w[i], e[i] + v := rV | wV | eV + m := byte(1) + for j := 0; j < 8; j++ { + if (v & m) != 0 { + // Make sure the fd is valid and decrement the reference + // immediately to ensure we don't leak. Note, another thread + // might be about to close fd. This is racy, but that's + // OK. Linux is racy in the same way. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, syserror.EBADF + } + file.DecRef() + + mask := waiter.EventMask(0) + if (rV & m) != 0 { + mask |= selectReadEvents + } + + if (wV & m) != 0 { + mask |= selectWriteEvents + } + + if (eV & m) != 0 { + mask |= selectExceptEvents + } + + pfd = append(pfd, syscalls.PollFD{ + FD: fd, + Events: mask, + }) + } + + fd++ + m <<= 1 + } + } + + // Do the syscall, then count the number of bits set. + _, _, err := syscalls.Poll(t, pfd, timeout) + if err != nil { + return 0, syserror.ConvertIntr(err, syserror.EINTR) + } + + // r, w, and e are currently event mask bitsets; unset bits corresponding + // to events that *didn't* occur. + bitSetCount := uintptr(0) + for idx := range pfd { + events := pfd[idx].REvents + i, j := pfd[idx].FD/8, uint(pfd[idx].FD%8) + m := byte(1) << j + if r[i]&m != 0 { + if (events & selectReadEvents) != 0 { + bitSetCount++ + } else { + r[i] &^= m + } + } + if w[i]&m != 0 { + if (events & selectWriteEvents) != 0 { + bitSetCount++ + } else { + w[i] &^= m + } + } + if e[i]&m != 0 { + if (events & selectExceptEvents) != 0 { + bitSetCount++ + } else { + e[i] &^= m + } + } + } + + // Copy updated vectors back. + if readFDs != 0 { + if _, err := t.CopyOut(readFDs, r); err != nil { + return 0, err + } + } + + if writeFDs != 0 { + if _, err := t.CopyOut(writeFDs, w); err != nil { + return 0, err + } + } + + if exceptFDs != 0 { + if _, err := t.CopyOut(exceptFDs, e); err != nil { + return 0, err + } + } + + return bitSetCount, nil +} + +// timeoutRemaining returns the amount of time remaining for the specified +// timeout or 0 if it has elapsed. +// +// startNs must be from CLOCK_MONOTONIC. +func timeoutRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration) time.Duration { + now := t.Kernel().MonotonicClock().Now() + remaining := timeout - now.Sub(startNs) + if remaining < 0 { + remaining = 0 + } + return remaining +} + +// copyOutTimespecRemaining copies the time remaining in timeout to timespecAddr. +// +// startNs must be from CLOCK_MONOTONIC. +func copyOutTimespecRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timespecAddr usermem.Addr) error { + if timeout <= 0 { + return nil + } + remaining := timeoutRemaining(t, startNs, timeout) + tsRemaining := linux.NsecToTimespec(remaining.Nanoseconds()) + return copyTimespecOut(t, timespecAddr, &tsRemaining) +} + +// copyOutTimevalRemaining copies the time remaining in timeout to timevalAddr. +// +// startNs must be from CLOCK_MONOTONIC. +func copyOutTimevalRemaining(t *kernel.Task, startNs ktime.Time, timeout time.Duration, timevalAddr usermem.Addr) error { + if timeout <= 0 { + return nil + } + remaining := timeoutRemaining(t, startNs, timeout) + tvRemaining := linux.NsecToTimeval(remaining.Nanoseconds()) + return copyTimevalOut(t, timevalAddr, &tvRemaining) +} + +// pollRestartBlock encapsulates the state required to restart poll(2) via +// restart_syscall(2). +type pollRestartBlock struct { + pfdAddr usermem.Addr + nfds uint + timeout time.Duration +} + +// Restart implements kernel.SyscallRestartBlock.Restart. +func (p *pollRestartBlock) Restart(t *kernel.Task) (uintptr, error) { + return poll(t, p.pfdAddr, p.nfds, p.timeout) +} + +func poll(t *kernel.Task, pfdAddr usermem.Addr, nfds uint, timeout time.Duration) (uintptr, error) { + remainingTimeout, n, err := doPoll(t, pfdAddr, nfds, timeout) + // On an interrupt poll(2) is restarted with the remaining timeout. + if err == syserror.EINTR { + t.SetSyscallRestartBlock(&pollRestartBlock{ + pfdAddr: pfdAddr, + nfds: nfds, + timeout: remainingTimeout, + }) + return 0, kernel.ERESTART_RESTARTBLOCK + } + return n, err +} + +// Poll implements linux syscall poll(2). +func Poll(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pfdAddr := args[0].Pointer() + nfds := uint(args[1].Uint()) // poll(2) uses unsigned long. + timeout := time.Duration(args[2].Int()) * time.Millisecond + n, err := poll(t, pfdAddr, nfds, timeout) + return n, nil, err +} + +// Ppoll implements linux syscall ppoll(2). +func Ppoll(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pfdAddr := args[0].Pointer() + nfds := uint(args[1].Uint()) // poll(2) uses unsigned long. + timespecAddr := args[2].Pointer() + maskAddr := args[3].Pointer() + maskSize := uint(args[4].Uint()) + + timeout, err := copyTimespecInToDuration(t, timespecAddr) + if err != nil { + return 0, nil, err + } + + var startNs ktime.Time + if timeout > 0 { + startNs = t.Kernel().MonotonicClock().Now() + } + + if maskAddr != 0 { + mask, err := copyInSigSet(t, maskAddr, maskSize) + if err != nil { + return 0, nil, err + } + + oldmask := t.SignalMask() + t.SetSignalMask(mask) + t.SetSavedSignalMask(oldmask) + } + + _, n, err := doPoll(t, pfdAddr, nfds, timeout) + copyErr := copyOutTimespecRemaining(t, startNs, timeout, timespecAddr) + // doPoll returns EINTR if interrupted, but ppoll is normally restartable + // if interrupted by something other than a signal handled by the + // application (i.e. returns ERESTARTNOHAND). However, if + // copyOutTimespecRemaining failed, then the restarted ppoll would use the + // wrong timeout, so the error should be left as EINTR. + // + // Note that this means that if err is nil but copyErr is not, copyErr is + // ignored. This is consistent with Linux. + if err == syserror.EINTR && copyErr == nil { + err = kernel.ERESTARTNOHAND + } + return n, nil, err +} + +// Select implements linux syscall select(2). +func Select(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + nfds := int(args[0].Int()) // select(2) uses an int. + readFDs := args[1].Pointer() + writeFDs := args[2].Pointer() + exceptFDs := args[3].Pointer() + timevalAddr := args[4].Pointer() + + // Use a negative Duration to indicate "no timeout". + timeout := time.Duration(-1) + if timevalAddr != 0 { + timeval, err := copyTimevalIn(t, timevalAddr) + if err != nil { + return 0, nil, err + } + if timeval.Sec < 0 || timeval.Usec < 0 { + return 0, nil, syserror.EINVAL + } + timeout = time.Duration(timeval.ToNsecCapped()) + } + startNs := t.Kernel().MonotonicClock().Now() + n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout) + copyErr := copyOutTimevalRemaining(t, startNs, timeout, timevalAddr) + // See comment in Ppoll. + if err == syserror.EINTR && copyErr == nil { + err = kernel.ERESTARTNOHAND + } + return n, nil, err +} + +// Pselect implements linux syscall pselect(2). +func Pselect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + nfds := int(args[0].Int()) // select(2) uses an int. + readFDs := args[1].Pointer() + writeFDs := args[2].Pointer() + exceptFDs := args[3].Pointer() + timespecAddr := args[4].Pointer() + maskWithSizeAddr := args[5].Pointer() + + timeout, err := copyTimespecInToDuration(t, timespecAddr) + if err != nil { + return 0, nil, err + } + + var startNs ktime.Time + if timeout > 0 { + startNs = t.Kernel().MonotonicClock().Now() + } + + if maskWithSizeAddr != 0 { + maskAddr, size, err := copyInSigSetWithSize(t, maskWithSizeAddr) + if err != nil { + return 0, nil, err + } + + if maskAddr != 0 { + mask, err := copyInSigSet(t, maskAddr, size) + if err != nil { + return 0, nil, err + } + oldmask := t.SignalMask() + t.SetSignalMask(mask) + t.SetSavedSignalMask(oldmask) + } + } + + n, err := doSelect(t, nfds, readFDs, writeFDs, exceptFDs, timeout) + copyErr := copyOutTimespecRemaining(t, startNs, timeout, timespecAddr) + // See comment in Ppoll. + if err == syserror.EINTR && copyErr == nil { + err = kernel.ERESTARTNOHAND + } + return n, nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_prctl.go b/pkg/sentry/syscalls/linux/sys_prctl.go new file mode 100644 index 000000000..2ca7471cf --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_prctl.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/bpf" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// userSockFprog is equivalent to Linux's struct sock_fprog on amd64. +type userSockFprog struct { + // Len is the length of the filter in BPF instructions. + Len uint16 + + _ [6]byte // padding for alignment + + // Filter is a user pointer to the struct sock_filter array that makes up + // the filter program. Filter is a uint64 rather than a usermem.Addr + // because usermem.Addr is actually uintptr, which is not a fixed-size + // type, and encoding/binary.Read objects to this. + Filter uint64 +} + +// Prctl implements linux syscall prctl(2). +// It has a list of subfunctions which operate on the process. The arguments are +// all based on each subfunction. +func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + option := args[0].Int() + + switch option { + case linux.PR_SET_PDEATHSIG: + sig := linux.Signal(args[1].Int()) + if sig != 0 && !sig.IsValid() { + return 0, nil, syscall.EINVAL + } + t.SetParentDeathSignal(sig) + return 0, nil, nil + + case linux.PR_GET_PDEATHSIG: + _, err := t.CopyOut(args[1].Pointer(), int32(t.ParentDeathSignal())) + return 0, nil, err + + case linux.PR_GET_KEEPCAPS: + if t.Credentials().KeepCaps { + return 1, nil, nil + } + + return 0, nil, nil + + case linux.PR_SET_KEEPCAPS: + val := args[1].Int() + // prctl(2): arg2 must be either 0 (permitted capabilities are cleared) + // or 1 (permitted capabilities are kept). + if val == 0 { + t.SetKeepCaps(false) + } else if val == 1 { + t.SetKeepCaps(true) + } else { + return 0, nil, syscall.EINVAL + } + + return 0, nil, nil + + case linux.PR_SET_NAME: + addr := args[1].Pointer() + name, err := t.CopyInString(addr, linux.TASK_COMM_LEN-1) + if err != nil && err != syscall.ENAMETOOLONG { + return 0, nil, err + } + t.SetName(name) + + case linux.PR_GET_NAME: + addr := args[1].Pointer() + buf := make([]byte, linux.TASK_COMM_LEN) + len := copy(buf, t.Name()) + if len < linux.TASK_COMM_LEN { + buf[len] = 0 + len++ + } + _, err := t.CopyOut(addr, buf[:len]) + if err != nil { + return 0, nil, err + } + + case linux.PR_SET_MM: + switch args[1].Int() { + case linux.PR_SET_MM_EXE_FILE: + fd := kdefs.FD(args[2].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // They trying to set exe to a non-file? + if !fs.IsFile(file.Dirent.Inode.StableAttr) { + return 0, nil, syscall.EBADF + } + + // Set the underlying executable. + t.MemoryManager().SetExecutable(file.Dirent) + default: + return 0, nil, syscall.EINVAL + } + + case linux.PR_SET_NO_NEW_PRIVS: + if args[1].Int() != 1 || args[2].Int() != 0 || args[3].Int() != 0 || args[4].Int() != 0 { + return 0, nil, syscall.EINVAL + } + // no_new_privs is assumed to always be set. See + // auth.Credentials.UpdateForExec. + return 0, nil, nil + + case linux.PR_GET_NO_NEW_PRIVS: + if args[1].Int() != 0 || args[2].Int() != 0 || args[3].Int() != 0 || args[4].Int() != 0 { + return 0, nil, syscall.EINVAL + } + return 1, nil, nil + + case linux.PR_SET_SECCOMP: + if args[1].Int() != linux.SECCOMP_MODE_FILTER { + // Unsupported mode. + return 0, nil, syscall.EINVAL + } + var fprog userSockFprog + if _, err := t.CopyIn(args[2].Pointer(), &fprog); err != nil { + return 0, nil, err + } + filter := make([]linux.BPFInstruction, int(fprog.Len)) + if _, err := t.CopyIn(usermem.Addr(fprog.Filter), &filter); err != nil { + return 0, nil, err + } + compiledFilter, err := bpf.Compile(filter) + if err != nil { + t.Debugf("Invalid seccomp-bpf filter: %v", err) + return 0, nil, syscall.EINVAL + } + return 0, nil, t.AppendSyscallFilter(compiledFilter) + + case linux.PR_GET_SECCOMP: + return uintptr(t.SeccompMode()), nil, nil + + case linux.PR_CAPBSET_READ: + cp := linux.Capability(args[1].Uint64()) + if !cp.Ok() { + return 0, nil, syscall.EINVAL + } + var rv uintptr + if auth.CapabilitySetOf(cp)&t.Credentials().BoundingCaps != 0 { + rv = 1 + } + return rv, nil, nil + + case linux.PR_CAPBSET_DROP: + cp := linux.Capability(args[1].Uint64()) + if !cp.Ok() { + return 0, nil, syscall.EINVAL + } + return 0, nil, t.DropBoundingCapability(cp) + + default: + t.Warningf("Unsupported prctl %d", option) + return 0, nil, syscall.EINVAL + } + + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go new file mode 100644 index 000000000..2dd59b1c3 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_random.go @@ -0,0 +1,92 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "crypto/rand" + "io" + "math" + + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + _GRND_NONBLOCK = 0x1 + _GRND_RANDOM = 0x2 +) + +// GetRandom implements the linux syscall getrandom(2). +// +// In a multi-tenant/shared environment, the only valid implementation is to +// fetch data from the urandom pool, otherwise starvation attacks become +// possible. The urandom pool is also expected to have plenty of entropy, thus +// the GRND_RANDOM flag is ignored. The GRND_NONBLOCK flag does not apply, as +// the pool will already be initialized. +func GetRandom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := args[1].SizeT() + flags := args[2].Int() + + // Flags are checked for validity but otherwise ignored. See above. + if flags & ^(_GRND_NONBLOCK|_GRND_RANDOM) != 0 { + return 0, nil, syserror.EINVAL + } + + if length > math.MaxInt32 { + length = math.MaxInt32 + } + ar, ok := addr.ToRange(uint64(length)) + if !ok { + return 0, nil, syserror.EFAULT + } + + // "If the urandom source has been initialized, reads of up to 256 bytes + // will always return as many bytes as requested and will not be + // interrupted by signals. No such guarantees apply for larger buffer + // sizes." - getrandom(2) + min := int(length) + if min > 256 { + min = 256 + } + n, err := t.MemoryManager().CopyOutFrom(t, usermem.AddrRangeSeqOf(ar), safemem.FromIOReader{&randReader{-1, min}}, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if n >= int64(min) { + return uintptr(n), nil, nil + } + return 0, nil, err +} + +// randReader is a io.Reader that handles partial reads from rand.Reader. +type randReader struct { + done int + min int +} + +// Read implements io.Reader.Read. +func (r *randReader) Read(dst []byte) (int, error) { + if r.done >= r.min { + return rand.Reader.Read(dst) + } + min := r.min - r.done + if min > len(dst) { + min = len(dst) + } + return io.ReadAtLeast(rand.Reader, dst, min) +} diff --git a/pkg/sentry/syscalls/linux/sys_read.go b/pkg/sentry/syscalls/linux/sys_read.go new file mode 100644 index 000000000..0be2d195a --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_read.go @@ -0,0 +1,274 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // EventMaskRead contains events that can be triggerd on reads. + EventMaskRead = waiter.EventIn | waiter.EventHUp | waiter.EventErr +) + +// Read implements linux syscall read(2). Note that we try to get a buffer that +// is exactly the size requested because some applications like qemu expect +// they can do large reads all at once. Bug for bug. Same for other read +// calls below. +func Read(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := args[2].SizeT() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the file is readable. + if !file.Flags().Read { + return 0, nil, syserror.EBADF + } + + // Check that the size is legitimate. + si := int(size) + if si < 0 { + return 0, nil, syserror.EINVAL + } + + // Get the destination of the read. + dst, err := t.SingleIOSequence(addr, si, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := readv(t, file, dst) + t.IOUsage().AccountReadSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "read", file) +} + +// Pread64 implements linux syscall pread64(2). +func Pread64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := args[2].SizeT() + offset := args[3].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the offset is legitimate. + if offset < 0 { + return 0, nil, syserror.EINVAL + } + + // Is reading at an offset supported? + if !file.Flags().Pread { + return 0, nil, syserror.ESPIPE + } + + // Check that the file is readable. + if !file.Flags().Read { + return 0, nil, syserror.EBADF + } + + // Check that the size is legitimate. + si := int(size) + if si < 0 { + return 0, nil, syserror.EINVAL + } + + // Get the destination of the read. + dst, err := t.SingleIOSequence(addr, si, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := preadv(t, file, dst, offset) + t.IOUsage().AccountReadSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "pread64", file) +} + +// Readv implements linux syscall readv(2). +func Readv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + iovcnt := int(args[2].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the file is readable. + if !file.Flags().Read { + return 0, nil, syserror.EBADF + } + + // Read the iovecs that specify the destination of the read. + dst, err := t.IovecsIOSequence(addr, iovcnt, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := readv(t, file, dst) + t.IOUsage().AccountReadSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "readv", file) +} + +// Preadv implements linux syscall preadv(2). +func Preadv(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + iovcnt := int(args[2].Int()) + offset := args[3].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the offset is legitimate. + if offset < 0 { + return 0, nil, syserror.EINVAL + } + + // Is reading at an offset supported? + if !file.Flags().Pread { + return 0, nil, syserror.ESPIPE + } + + // Check that the file is readable. + if !file.Flags().Read { + return 0, nil, syserror.EBADF + } + + // Read the iovecs that specify the destination of the read. + dst, err := t.IovecsIOSequence(addr, iovcnt, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := preadv(t, file, dst, offset) + t.IOUsage().AccountReadSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "preadv", file) +} + +func readv(t *kernel.Task, f *fs.File, dst usermem.IOSequence) (int64, error) { + n, err := f.Readv(t, dst) + if err != syserror.ErrWouldBlock || f.Flags().NonBlocking { + if n > 0 { + // Queue notification if we read anything. + f.Dirent.InotifyEvent(linux.IN_ACCESS, 0) + } + return n, err + } + + // Register for notifications. + w, ch := waiter.NewChannelEntry(nil) + f.EventRegister(&w, EventMaskRead) + + total := n + for { + // Shorten dst to reflect bytes previously read. + dst = dst.DropFirst64(n) + + // Issue the request and break out if it completes with anything + // other than "would block". + n, err = f.Readv(t, dst) + total += n + if err != syserror.ErrWouldBlock { + break + } + + // Wait for a notification that we should retry. + if err = t.Block(ch); err != nil { + break + } + } + + f.EventUnregister(&w) + + if total > 0 { + // Queue notification if we read anything. + f.Dirent.InotifyEvent(linux.IN_ACCESS, 0) + } + + return total, err +} + +func preadv(t *kernel.Task, f *fs.File, dst usermem.IOSequence, offset int64) (int64, error) { + n, err := f.Preadv(t, dst, offset) + if err != syserror.ErrWouldBlock || f.Flags().NonBlocking { + if n > 0 { + // Queue notification if we read anything. + f.Dirent.InotifyEvent(linux.IN_ACCESS, 0) + } + return n, err + } + + // Register for notifications. + w, ch := waiter.NewChannelEntry(nil) + f.EventRegister(&w, EventMaskRead) + + total := n + for { + // Shorten dst to reflect bytes previously read. + dst = dst.DropFirst64(n) + + // Issue the request and break out if it completes with anything + // other than "would block". + n, err = f.Preadv(t, dst, offset+total) + total += n + if err != syserror.ErrWouldBlock { + break + } + + // Wait for a notification that we should retry. + if err = t.Block(ch); err != nil { + break + } + } + + f.EventUnregister(&w) + + if total > 0 { + // Queue notification if we read anything. + f.Dirent.InotifyEvent(linux.IN_ACCESS, 0) + } + + return total, err +} diff --git a/pkg/sentry/syscalls/linux/sys_rlimit.go b/pkg/sentry/syscalls/linux/sys_rlimit.go new file mode 100644 index 000000000..481e79eaa --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_rlimit.go @@ -0,0 +1,217 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/limits" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// rlimit describes an implementation of 'struct rlimit', which may vary from +// system-to-system. +type rlimit interface { + // toLimit converts an rlimit to a limits.Limit. + toLimit() *limits.Limit + + // fromLimit converts a limits.Limit to an rlimit. + fromLimit(lim limits.Limit) + + // copyIn copies an rlimit from the untrusted app to the kernel. + copyIn(t *kernel.Task, addr usermem.Addr) error + + // copyOut copies an rlimit from the kernel to the untrusted app. + copyOut(t *kernel.Task, addr usermem.Addr) error +} + +// newRlimit returns the appropriate rlimit type for 'struct rlimit' on this system. +func newRlimit(t *kernel.Task) (rlimit, error) { + switch t.Arch().Width() { + case 8: + // On 64-bit system, struct rlimit and struct rlimit64 are identical. + return &rlimit64{}, nil + default: + return nil, syserror.ENOSYS + } +} + +type rlimit64 struct { + Cur uint64 + Max uint64 +} + +func (r *rlimit64) toLimit() *limits.Limit { + return &limits.Limit{ + Cur: limits.FromLinux(r.Cur), + Max: limits.FromLinux(r.Max), + } +} + +func (r *rlimit64) fromLimit(lim limits.Limit) { + *r = rlimit64{ + Cur: limits.ToLinux(lim.Cur), + Max: limits.ToLinux(lim.Max), + } +} + +func (r *rlimit64) copyIn(t *kernel.Task, addr usermem.Addr) error { + _, err := t.CopyIn(addr, r) + return err +} + +func (r *rlimit64) copyOut(t *kernel.Task, addr usermem.Addr) error { + _, err := t.CopyOut(addr, *r) + return err +} + +func makeRlimit64(lim limits.Limit) *rlimit64 { + return &rlimit64{Cur: lim.Cur, Max: lim.Max} +} + +// setableLimits is the set of supported setable limits. +var setableLimits = map[limits.LimitType]struct{}{ + limits.NumberOfFiles: {}, + limits.AS: {}, + limits.CPU: {}, + limits.Data: {}, + limits.FileSize: {}, + limits.Stack: {}, + // These are not enforced, but we include them here to avoid returning + // EPERM, since some apps expect them to succeed. + limits.Core: {}, + limits.ProcessCount: {}, +} + +func prlimit64(t *kernel.Task, resource limits.LimitType, newLim *limits.Limit) (limits.Limit, error) { + if newLim == nil { + return t.ThreadGroup().Limits().Get(resource), nil + } + + if _, ok := setableLimits[resource]; !ok { + return limits.Limit{}, syserror.EPERM + } + oldLim, err := t.ThreadGroup().Limits().Set(resource, *newLim) + if err != nil { + return limits.Limit{}, err + } + + if resource == limits.CPU { + t.ThreadGroup().SetCPUTimer(newLim) + } + return oldLim, nil +} + +// Getrlimit implements linux syscall getrlimit(2). +func Getrlimit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + resource, ok := limits.FromLinuxResource[int(args[0].Int())] + if !ok { + // Return err; unknown limit. + return 0, nil, syserror.EINVAL + } + addr := args[1].Pointer() + rlim, err := newRlimit(t) + if err != nil { + return 0, nil, err + } + lim, err := prlimit64(t, resource, nil) + if err != nil { + return 0, nil, err + } + rlim.fromLimit(lim) + return 0, nil, rlim.copyOut(t, addr) +} + +// Setrlimit implements linux syscall setrlimit(2). +func Setrlimit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + resource, ok := limits.FromLinuxResource[int(args[0].Int())] + if !ok { + // Return err; unknown limit. + return 0, nil, syserror.EINVAL + } + addr := args[1].Pointer() + rlim, err := newRlimit(t) + if err != nil { + return 0, nil, err + } + if err := rlim.copyIn(t, addr); err != nil { + return 0, nil, syserror.EFAULT + } + _, err = prlimit64(t, resource, rlim.toLimit()) + return 0, nil, err +} + +// Prlimit64 implements linux syscall prlimit64(2). +func Prlimit64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := kernel.ThreadID(args[0].Int()) + resource, ok := limits.FromLinuxResource[int(args[1].Int())] + if !ok { + // Return err; unknown limit. + return 0, nil, syserror.EINVAL + } + newRlimAddr := args[2].Pointer() + oldRlimAddr := args[3].Pointer() + + var newLim *limits.Limit + if newRlimAddr != 0 { + var nrl rlimit64 + if err := nrl.copyIn(t, newRlimAddr); err != nil { + return 0, nil, syserror.EFAULT + } + newLim = nrl.toLimit() + } + + if tid < 0 { + return 0, nil, syserror.EINVAL + } + ot := t + if tid > 0 { + if ot = t.PIDNamespace().TaskWithID(tid); ot == nil { + return 0, nil, syserror.ESRCH + } + } + + // "To set or get the resources of a process other than itself, the caller + // must have the CAP_SYS_RESOURCE capability, or the real, effective, and + // saved set user IDs of the target process must match the real user ID of + // the caller and the real, effective, and saved set group IDs of the + // target process must match the real group ID of the caller." + if !t.HasCapabilityIn(linux.CAP_SYS_RESOURCE, t.PIDNamespace().UserNamespace()) { + cred, tcred := t.Credentials(), ot.Credentials() + if cred.RealKUID != tcred.RealKUID || + cred.RealKUID != tcred.EffectiveKUID || + cred.RealKUID != tcred.SavedKUID || + cred.RealKGID != tcred.RealKGID || + cred.RealKGID != tcred.EffectiveKGID || + cred.RealKGID != tcred.SavedKGID { + return 0, nil, syserror.EPERM + } + } + + oldLim, err := prlimit64(ot, resource, newLim) + if err != nil { + return 0, nil, err + } + + if oldRlimAddr != 0 { + if err := makeRlimit64(oldLim).copyOut(t, oldRlimAddr); err != nil { + return 0, nil, syserror.EFAULT + } + } + + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_rusage.go b/pkg/sentry/syscalls/linux/sys_rusage.go new file mode 100644 index 000000000..82e42b589 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_rusage.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +func getrusage(t *kernel.Task, which int32) linux.Rusage { + var cs usage.CPUStats + + switch which { + case linux.RUSAGE_SELF: + cs = t.ThreadGroup().CPUStats() + + case linux.RUSAGE_CHILDREN: + cs = t.ThreadGroup().JoinedChildCPUStats() + + case linux.RUSAGE_THREAD: + cs = t.CPUStats() + + case linux.RUSAGE_BOTH: + tg := t.ThreadGroup() + cs = tg.CPUStats() + cs.Accumulate(tg.JoinedChildCPUStats()) + } + + return linux.Rusage{ + UTime: linux.NsecToTimeval(cs.UserTime.Nanoseconds()), + STime: linux.NsecToTimeval(cs.SysTime.Nanoseconds()), + NVCSw: int64(cs.VoluntarySwitches), + MaxRSS: int64(t.MaxRSS(which) / 1024), + } +} + +// Getrusage implements linux syscall getrusage(2). +// marked "y" are supported now +// marked "*" are not used on Linux +// marked "p" are pending for support +// +// y struct timeval ru_utime; /* user CPU time used */ +// y struct timeval ru_stime; /* system CPU time used */ +// p long ru_maxrss; /* maximum resident set size */ +// * long ru_ixrss; /* integral shared memory size */ +// * long ru_idrss; /* integral unshared data size */ +// * long ru_isrss; /* integral unshared stack size */ +// p long ru_minflt; /* page reclaims (soft page faults) */ +// p long ru_majflt; /* page faults (hard page faults) */ +// * long ru_nswap; /* swaps */ +// p long ru_inblock; /* block input operations */ +// p long ru_oublock; /* block output operations */ +// * long ru_msgsnd; /* IPC messages sent */ +// * long ru_msgrcv; /* IPC messages received */ +// * long ru_nsignals; /* signals received */ +// y long ru_nvcsw; /* voluntary context switches */ +// y long ru_nivcsw; /* involuntary context switches */ +func Getrusage(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + which := args[0].Int() + addr := args[1].Pointer() + + if which != linux.RUSAGE_SELF && which != linux.RUSAGE_CHILDREN && which != linux.RUSAGE_THREAD { + return 0, nil, syserror.EINVAL + } + + ru := getrusage(t, which) + _, err := t.CopyOut(addr, &ru) + return 0, nil, err +} + +// Times implements linux syscall times(2). +func Times(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + // Calculate the ticks first, and figure out if any additional work is + // necessary. Linux allows for a NULL addr, in which case only the + // return value is meaningful. We don't need to do anything else. + ticks := uintptr(ktime.NowFromContext(t).Nanoseconds() / linux.ClockTick.Nanoseconds()) + if addr == 0 { + return ticks, nil, nil + } + + cs1 := t.ThreadGroup().CPUStats() + cs2 := t.ThreadGroup().JoinedChildCPUStats() + r := linux.Tms{ + UTime: linux.ClockTFromDuration(cs1.UserTime), + STime: linux.ClockTFromDuration(cs1.SysTime), + CUTime: linux.ClockTFromDuration(cs2.UserTime), + CSTime: linux.ClockTFromDuration(cs2.SysTime), + } + if _, err := t.CopyOut(addr, &r); err != nil { + return 0, nil, err + } + + return ticks, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_sched.go b/pkg/sentry/syscalls/linux/sys_sched.go new file mode 100644 index 000000000..ff9e46077 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_sched.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +const ( + onlyScheduler = linux.SCHED_NORMAL + onlyPriority = 0 +) + +// SchedParam replicates struct sched_param in sched.h. +type SchedParam struct { + schedPriority int64 +} + +// SchedGetparam implements linux syscall sched_getparam(2). +func SchedGetparam(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := args[0].Int() + param := args[1].Pointer() + if param == 0 { + return 0, nil, syscall.EINVAL + } + if pid < 0 { + return 0, nil, syscall.EINVAL + } + if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil { + return 0, nil, syscall.ESRCH + } + r := SchedParam{schedPriority: onlyPriority} + if _, err := t.CopyOut(param, r); err != nil { + return 0, nil, err + } + + return 0, nil, nil +} + +// SchedGetscheduler implements linux syscall sched_getscheduler(2). +func SchedGetscheduler(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := args[0].Int() + if pid < 0 { + return 0, nil, syscall.EINVAL + } + if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil { + return 0, nil, syscall.ESRCH + } + return onlyScheduler, nil, nil +} + +// SchedSetscheduler implements linux syscall sched_setscheduler(2). +func SchedSetscheduler(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := args[0].Int() + policy := args[1].Int() + param := args[2].Pointer() + if pid < 0 { + return 0, nil, syscall.EINVAL + } + if policy != onlyScheduler { + return 0, nil, syscall.EINVAL + } + if pid != 0 && t.PIDNamespace().TaskWithID(kernel.ThreadID(pid)) == nil { + return 0, nil, syscall.ESRCH + } + var r SchedParam + if _, err := t.CopyIn(param, &r); err != nil { + return 0, nil, syscall.EINVAL + } + if r.schedPriority != onlyPriority { + return 0, nil, syscall.EINVAL + } + return 0, nil, nil +} + +// SchedGetPriorityMax implements linux syscall sched_get_priority_max(2). +func SchedGetPriorityMax(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return onlyPriority, nil, nil +} + +// SchedGetPriorityMin implements linux syscall sched_get_priority_min(2). +func SchedGetPriorityMin(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return onlyPriority, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_sem.go b/pkg/sentry/syscalls/linux/sys_sem.go new file mode 100644 index 000000000..a8983705b --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_sem.go @@ -0,0 +1,166 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "math" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const opsMax = 500 // SEMOPM + +// Semget handles: semget(key_t key, int nsems, int semflg) +func Semget(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + key := args[0].Int() + nsems := args[1].Int() + flag := args[2].Int() + + private := key == linux.IPC_PRIVATE + create := flag&linux.IPC_CREAT == linux.IPC_CREAT + exclusive := flag&linux.IPC_EXCL == linux.IPC_EXCL + mode := linux.FileMode(flag & 0777) + + r := t.IPCNamespace().SemaphoreRegistry() + set, err := r.FindOrCreate(t, key, nsems, mode, private, create, exclusive) + if err != nil { + return 0, nil, err + } + return uintptr(set.ID), nil, nil +} + +// Semop handles: semop(int semid, struct sembuf *sops, size_t nsops) +func Semop(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + id := args[0].Int() + sembufAddr := args[1].Pointer() + nsops := args[2].SizeT() + + r := t.IPCNamespace().SemaphoreRegistry() + set := r.FindByID(id) + if set == nil { + return 0, nil, syserror.EINVAL + } + if nsops <= 0 { + return 0, nil, syserror.EINVAL + } + if nsops > opsMax { + return 0, nil, syserror.E2BIG + } + + ops := make([]linux.Sembuf, nsops) + if _, err := t.CopyIn(sembufAddr, ops); err != nil { + return 0, nil, err + } + + creds := auth.CredentialsFromContext(t) + for { + ch, num, err := set.ExecuteOps(t, ops, creds) + if ch == nil || err != nil { + // We're done (either on success or a failure). + return 0, nil, err + } + if err = t.Block(ch); err != nil { + set.AbortWait(num, ch) + return 0, nil, err + } + } +} + +// Semctl handles: semctl(int semid, int semnum, int cmd, ...) +func Semctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + id := args[0].Int() + num := args[1].Int() + cmd := args[2].Int() + + switch cmd { + case linux.SETVAL: + val := args[3].Int() + if val > math.MaxInt16 { + return 0, nil, syserror.ERANGE + } + return 0, nil, setVal(t, id, num, int16(val)) + + case linux.GETVAL: + v, err := getVal(t, id, num) + return uintptr(v), nil, err + + case linux.IPC_RMID: + return 0, nil, remove(t, id) + + case linux.IPC_SET: + arg := args[3].Pointer() + s := linux.SemidDS{} + if _, err := t.CopyIn(arg, &s); err != nil { + return 0, nil, err + } + + perms := fs.FilePermsFromMode(linux.FileMode(s.SemPerm.Mode & 0777)) + return 0, nil, ipcSet(t, id, auth.UID(s.SemPerm.UID), auth.GID(s.SemPerm.GID), perms) + + default: + return 0, nil, syserror.EINVAL + } +} + +func remove(t *kernel.Task, id int32) error { + r := t.IPCNamespace().SemaphoreRegistry() + creds := auth.CredentialsFromContext(t) + return r.RemoveID(id, creds) +} + +func ipcSet(t *kernel.Task, id int32, uid auth.UID, gid auth.GID, perms fs.FilePermissions) error { + r := t.IPCNamespace().SemaphoreRegistry() + set := r.FindByID(id) + if set == nil { + return syserror.EINVAL + } + + creds := auth.CredentialsFromContext(t) + kuid := creds.UserNamespace.MapToKUID(uid) + if !kuid.Ok() { + return syserror.EINVAL + } + kgid := creds.UserNamespace.MapToKGID(gid) + if !kgid.Ok() { + return syserror.EINVAL + } + owner := fs.FileOwner{UID: kuid, GID: kgid} + return set.Change(t, creds, owner, perms) +} + +func setVal(t *kernel.Task, id int32, num int32, val int16) error { + r := t.IPCNamespace().SemaphoreRegistry() + set := r.FindByID(id) + if set == nil { + return syserror.EINVAL + } + creds := auth.CredentialsFromContext(t) + return set.SetVal(t, num, val, creds) +} + +func getVal(t *kernel.Task, id int32, num int32) (int16, error) { + r := t.IPCNamespace().SemaphoreRegistry() + set := r.FindByID(id) + if set == nil { + return 0, syserror.EINVAL + } + creds := auth.CredentialsFromContext(t) + return set.GetVal(num, creds) +} diff --git a/pkg/sentry/syscalls/linux/sys_signal.go b/pkg/sentry/syscalls/linux/sys_signal.go new file mode 100644 index 000000000..93b3f531a --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_signal.go @@ -0,0 +1,553 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "math" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// "For a process to have permission to send a signal it must +// - either be privileged (CAP_KILL), or +// - the real or effective user ID of the sending process must be equal to the +// real or saved set-user-ID of the target process. +// +// In the case of SIGCONT it suffices when the sending and receiving processes +// belong to the same session." - kill(2) +// +// Equivalent to kernel/signal.c:check_kill_permission. +func mayKill(t *kernel.Task, target *kernel.Task, sig linux.Signal) bool { + // kernel/signal.c:check_kill_permission also allows a signal if the + // sending and receiving tasks share a thread group, which is not + // mentioned in kill(2) since kill does not allow task-level + // granularity in signal sending. + if t.ThreadGroup() == target.ThreadGroup() { + return true + } + + if t.HasCapabilityIn(linux.CAP_KILL, target.UserNamespace()) { + return true + } + + creds := t.Credentials() + tcreds := target.Credentials() + if creds.EffectiveKUID == tcreds.SavedKUID || + creds.EffectiveKUID == tcreds.RealKUID || + creds.RealKUID == tcreds.SavedKUID || + creds.RealKUID == tcreds.RealKUID { + return true + } + + if sig == linux.SIGCONT && target.ThreadGroup().Session() == t.ThreadGroup().Session() { + return true + } + return false +} + +// Kill implements linux syscall kill(2). +func Kill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := kernel.ThreadID(args[0].Int()) + sig := linux.Signal(args[1].Int()) + + switch { + case pid > 0: + // "If pid is positive, then signal sig is sent to the process with the + // ID specified by pid." - kill(2) + // This loops to handle races with execve where target dies between + // TaskWithID and SendGroupSignal. Compare Linux's + // kernel/signal.c:kill_pid_info(). + for { + target := t.PIDNamespace().TaskWithID(pid) + if target == nil { + return 0, nil, syserror.ESRCH + } + if !mayKill(t, target, sig) { + return 0, nil, syserror.EPERM + } + info := &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoUser, + } + info.SetPid(int32(target.PIDNamespace().IDOfTask(t))) + info.SetUid(int32(t.Credentials().RealKUID.In(target.UserNamespace()).OrOverflow())) + if err := target.SendGroupSignal(info); err != syserror.ESRCH { + return 0, nil, err + } + } + case pid == -1: + // "If pid equals -1, then sig is sent to every process for which the + // calling process has permission to send signals, except for process 1 + // (init), but see below. ... POSIX.1-2001 requires that kill(-1,sig) + // send sig to all processes that the calling process may send signals + // to, except possibly for some implementation-defined system + // processes. Linux allows a process to signal itself, but on Linux the + // call kill(-1,sig) does not signal the calling process." + var ( + lastErr error + delivered int + ) + for _, tg := range t.PIDNamespace().ThreadGroups() { + if tg == t.ThreadGroup() { + continue + } + if t.PIDNamespace().IDOfThreadGroup(tg) == kernel.InitTID { + continue + } + + // If pid == -1, the returned error is the last non-EPERM error + // from any call to group_send_sig_info. + if !mayKill(t, tg.Leader(), sig) { + continue + } + // Here and below, whether or not kill returns an error may + // depend on the iteration order. We at least implement the + // semantics documented by the man page: "On success (at least + // one signal was sent), zero is returned." + info := &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoUser, + } + info.SetPid(int32(tg.PIDNamespace().IDOfTask(t))) + info.SetUid(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) + err := tg.SendSignal(info) + if err == syserror.ESRCH { + // ESRCH is ignored because it means the task + // exited while we were iterating. This is a + // race which would not normally exist on + // Linux, so we suppress it. + continue + } + delivered++ + if err != nil { + lastErr = err + } + } + if delivered > 0 { + return 0, nil, lastErr + } + return 0, nil, syserror.ESRCH + default: + // "If pid equals 0, then sig is sent to every process in the process + // group of the calling process." + // + // "If pid is less than -1, then sig is sent to every process + // in the process group whose ID is -pid." + pgid := kernel.ProcessGroupID(-pid) + if pgid == 0 { + pgid = t.PIDNamespace().IDOfProcessGroup(t.ThreadGroup().ProcessGroup()) + } + + // If pid != -1 (i.e. signalling a process group), the returned error + // is the last error from any call to group_send_sig_info. + lastErr := syserror.ESRCH + for _, tg := range t.PIDNamespace().ThreadGroups() { + if t.PIDNamespace().IDOfProcessGroup(tg.ProcessGroup()) == pgid { + if !mayKill(t, tg.Leader(), sig) { + lastErr = syserror.EPERM + continue + } + + info := &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoUser, + } + info.SetPid(int32(tg.PIDNamespace().IDOfTask(t))) + info.SetUid(int32(t.Credentials().RealKUID.In(tg.Leader().UserNamespace()).OrOverflow())) + // See note above regarding ESRCH race above. + if err := tg.SendSignal(info); err != syserror.ESRCH { + lastErr = err + } + } + } + + return 0, nil, lastErr + } +} + +func tkillSigInfo(sender, receiver *kernel.Task, sig linux.Signal) *arch.SignalInfo { + info := &arch.SignalInfo{ + Signo: int32(sig), + Code: arch.SignalInfoTkill, + } + info.SetPid(int32(receiver.PIDNamespace().IDOfThreadGroup(sender.ThreadGroup()))) + info.SetUid(int32(sender.Credentials().RealKUID.In(receiver.UserNamespace()).OrOverflow())) + return info +} + +// Tkill implements linux syscall tkill(2). +func Tkill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := kernel.ThreadID(args[0].Int()) + sig := linux.Signal(args[1].Int()) + + // N.B. Inconsistent with man page, linux actually rejects calls with + // tid <=0 by EINVAL. This isn't the same for all signal calls. + if tid <= 0 { + return 0, nil, syserror.EINVAL + } + + target := t.PIDNamespace().TaskWithID(tid) + if target == nil { + return 0, nil, syserror.ESRCH + } + + if !mayKill(t, target, sig) { + return 0, nil, syserror.EPERM + } + return 0, nil, target.SendSignal(tkillSigInfo(t, target, sig)) +} + +// Tgkill implements linux syscall tgkill(2). +func Tgkill(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tgid := kernel.ThreadID(args[0].Int()) + tid := kernel.ThreadID(args[1].Int()) + sig := linux.Signal(args[2].Int()) + + // N.B. Inconsistent with man page, linux actually rejects calls with + // tgid/tid <=0 by EINVAL. This isn't the same for all signal calls. + if tgid <= 0 || tid <= 0 { + return 0, nil, syserror.EINVAL + } + + targetTG := t.PIDNamespace().ThreadGroupWithID(tgid) + target := t.PIDNamespace().TaskWithID(tid) + if targetTG == nil || target == nil || target.ThreadGroup() != targetTG { + return 0, nil, syserror.ESRCH + } + + if !mayKill(t, target, sig) { + return 0, nil, syserror.EPERM + } + return 0, nil, target.SendSignal(tkillSigInfo(t, target, sig)) +} + +// RtSigaction implements linux syscall rt_sigaction(2). +func RtSigaction(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + sig := linux.Signal(args[0].Int()) + newactarg := args[1].Pointer() + oldactarg := args[2].Pointer() + + var newactptr *arch.SignalAct + if newactarg != 0 { + newact, err := t.CopyInSignalAct(newactarg) + if err != nil { + return 0, nil, err + } + newactptr = &newact + } + oldact, err := t.ThreadGroup().SetSignalAct(sig, newactptr) + if err != nil { + return 0, nil, err + } + if oldactarg != 0 { + if err := t.CopyOutSignalAct(oldactarg, &oldact); err != nil { + return 0, nil, err + } + } + return 0, nil, nil +} + +// Sigreturn implements linux syscall sigreturn(2). +func Sigreturn(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + ctrl, err := t.SignalReturn(false) + return 0, ctrl, err +} + +// RtSigreturn implements linux syscall rt_sigreturn(2). +func RtSigreturn(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + ctrl, err := t.SignalReturn(true) + return 0, ctrl, err +} + +// RtSigprocmask implements linux syscall rt_sigprocmask(2). +func RtSigprocmask(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + how := args[0].Int() + setaddr := args[1].Pointer() + oldaddr := args[2].Pointer() + sigsetsize := args[3].SizeT() + + if sigsetsize != linux.SignalSetSize { + return 0, nil, syserror.EINVAL + } + oldmask := t.SignalMask() + if setaddr != 0 { + mask, err := copyInSigSet(t, setaddr, sigsetsize) + if err != nil { + return 0, nil, err + } + + switch how { + case linux.SIG_BLOCK: + t.SetSignalMask(oldmask | mask) + case linux.SIG_UNBLOCK: + t.SetSignalMask(oldmask &^ mask) + case linux.SIG_SETMASK: + t.SetSignalMask(mask) + default: + return 0, nil, syserror.EINVAL + } + } + if oldaddr != 0 { + return 0, nil, copyOutSigSet(t, oldaddr, oldmask) + } + + return 0, nil, nil +} + +// Sigaltstack implements linux syscall sigaltstack(2). +func Sigaltstack(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + setaddr := args[0].Pointer() + oldaddr := args[1].Pointer() + + if oldaddr != 0 { + alt := t.SignalStack() + if t.OnSignalStack(alt) { + alt.Flags |= arch.SignalStackFlagOnStack + } + if err := t.CopyOutSignalStack(oldaddr, &alt); err != nil { + return 0, nil, err + } + } + if setaddr != 0 { + if t.OnSignalStack(t.SignalStack()) { + return 0, nil, syserror.EPERM + } + alt, err := t.CopyInSignalStack(setaddr) + if err != nil { + return 0, nil, err + } + if err := t.SetSignalStack(alt); err != nil { + return 0, nil, err + } + } + + return 0, nil, nil +} + +// Pause implements linux syscall pause(2). +func Pause(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, syserror.ConvertIntr(t.Block(nil), kernel.ERESTARTNOHAND) +} + +func sigtimedwait(t *kernel.Task, mask linux.SignalSet, timeout time.Duration) (*arch.SignalInfo, error) { + // Is it already pending? + if info := t.TakeSignal(^mask); info != nil { + return info, nil + } + + // No signals available immediately and asked not to wait. + if timeout == 0 { + return nil, syserror.EAGAIN + } + + // No signals available yet. Temporarily unblock the ones we are interested + // in then wait for either a timeout or a new signal. + oldmask := t.SignalMask() + t.SetSignalMask(oldmask &^ mask) + _, err := t.BlockWithTimeout(nil, true, timeout) + t.SetSignalMask(oldmask) + + // How did the wait go? + switch err { + case syserror.ErrInterrupted: + if info := t.TakeSignal(^mask); info != nil { + // Got one of the signals we were waiting for. + return info, nil + } + // Got a signal we weren't waiting for. + return nil, syserror.EINTR + case syserror.ETIMEDOUT: + // Timed out and still no signals. + return nil, syserror.EAGAIN + default: + // Some other error? Shouldn't be possible. The event channel + // passed to BlockWithTimeout was nil, so the only two ways the + // block could've ended are a timeout or an interrupt. + panic("unreachable") + } +} + +// RtSigpending implements linux syscall rt_sigpending(2). +func RtSigpending(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + pending := t.PendingSignals() + _, err := t.CopyOut(addr, pending) + return 0, nil, err +} + +// RtSigtimedwait implements linux syscall rt_sigtimedwait(2). +func RtSigtimedwait(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + sigset := args[0].Pointer() + siginfo := args[1].Pointer() + timespec := args[2].Pointer() + sigsetsize := args[3].SizeT() + + mask, err := copyInSigSet(t, sigset, sigsetsize) + if err != nil { + return 0, nil, err + } + + var timeout time.Duration + if timespec != 0 { + d, err := copyTimespecIn(t, timespec) + if err != nil { + return 0, nil, err + } + if !d.Valid() { + return 0, nil, syserror.EINVAL + } + timeout = time.Duration(d.ToNsecCapped()) + } else { + timeout = time.Duration(math.MaxInt64) + } + + si, err := sigtimedwait(t, mask, timeout) + if err != nil { + return 0, nil, err + } + + if si != nil { + if siginfo != 0 { + si.FixSignalCodeForUser() + if _, err := t.CopyOut(siginfo, si); err != nil { + return 0, nil, err + } + } + return uintptr(si.Signo), nil, nil + } + + // sigtimedwait's not supposed to return nil si and err... + return 0, nil, nil +} + +// RtSigqueueinfo implements linux syscall rt_sigqueueinfo(2). +func RtSigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := kernel.ThreadID(args[0].Int()) + sig := linux.Signal(args[1].Int()) + infoAddr := args[2].Pointer() + + // Copy in the info. + // + // We must ensure that the Signo is set (Linux overrides this in the + // same way), and that the code is in the allowed set. This same logic + // appears below in RtSigtgqueueinfo and should be kept in sync. + var info arch.SignalInfo + if _, err := t.CopyIn(infoAddr, &info); err != nil { + return 0, nil, err + } + info.Signo = int32(sig) + + // This must loop to handle the race with execve described in Kill. + for { + // Deliver to the given task's thread group. + target := t.PIDNamespace().TaskWithID(pid) + if target == nil { + return 0, nil, syserror.ESRCH + } + + // If the sender is not the receiver, it can't use si_codes used by the + // kernel or SI_TKILL. + if (info.Code >= 0 || info.Code == arch.SignalInfoTkill) && target != t { + return 0, nil, syserror.EPERM + } + + if !mayKill(t, target, sig) { + return 0, nil, syserror.EPERM + } + + if err := target.SendGroupSignal(&info); err != syserror.ESRCH { + return 0, nil, err + } + } +} + +// RtTgsigqueueinfo implements linux syscall rt_tgsigqueueinfo(2). +func RtTgsigqueueinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tgid := kernel.ThreadID(args[0].Int()) + tid := kernel.ThreadID(args[1].Int()) + sig := linux.Signal(args[2].Int()) + infoAddr := args[3].Pointer() + + // N.B. Inconsistent with man page, linux actually rejects calls with + // tgid/tid <=0 by EINVAL. This isn't the same for all signal calls. + if tgid <= 0 || tid <= 0 { + return 0, nil, syserror.EINVAL + } + + // Copy in the info. See RtSigqueueinfo above. + var info arch.SignalInfo + if _, err := t.CopyIn(infoAddr, &info); err != nil { + return 0, nil, err + } + info.Signo = int32(sig) + + // Deliver to the given task. + targetTG := t.PIDNamespace().ThreadGroupWithID(tgid) + target := t.PIDNamespace().TaskWithID(tid) + if targetTG == nil || target == nil || target.ThreadGroup() != targetTG { + return 0, nil, syserror.ESRCH + } + + // If the sender is not the receiver, it can't use si_codes used by the + // kernel or SI_TKILL. + if (info.Code >= 0 || info.Code == arch.SignalInfoTkill) && target != t { + return 0, nil, syserror.EPERM + } + + if !mayKill(t, target, sig) { + return 0, nil, syserror.EPERM + } + return 0, nil, target.SendSignal(&info) +} + +// RtSigsuspend implements linux syscall rt_sigsuspend(2). +func RtSigsuspend(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + sigset := args[0].Pointer() + + // Copy in the signal mask. + var mask linux.SignalSet + if _, err := t.CopyIn(sigset, &mask); err != nil { + return 0, nil, err + } + mask &^= kernel.UnblockableSignals + + // Swap the mask. + oldmask := t.SignalMask() + t.SetSignalMask(mask) + t.SetSavedSignalMask(oldmask) + + // Perform the wait. + return 0, nil, syserror.ConvertIntr(t.Block(nil), kernel.ERESTARTNOHAND) +} + +// RestartSyscall implements the linux syscall restart_syscall(2). +func RestartSyscall(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + if r := t.SyscallRestartBlock(); r != nil { + n, err := r.Restart(t) + return n, nil, err + } + // The restart block should never be nil here, but it's possible + // ERESTART_RESTARTBLOCK was set by ptrace without the current syscall + // setting up a restart block. If ptrace didn't manipulate the return value, + // finding a nil restart block is a bug. Linux ensures that the restart + // function is never null by (re)initializing it with one that translates + // the restart into EINTR. We'll emulate that behaviour. + t.Debugf("Restart block missing in restart_syscall(2). Did ptrace inject a return value of ERESTART_RESTARTBLOCK?") + return 0, nil, syserror.EINTR +} diff --git a/pkg/sentry/syscalls/linux/sys_socket.go b/pkg/sentry/syscalls/linux/sys_socket.go new file mode 100644 index 000000000..3797c0a5d --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_socket.go @@ -0,0 +1,1059 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket" + "gvisor.googlesource.com/gvisor/pkg/sentry/socket/control" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix" +) + +// minListenBacklog is the minimum reasonable backlog for listening sockets. +const minListenBacklog = 8 + +// maxListenBacklog is the maximum allowed backlog for listening sockets. +const maxListenBacklog = 1024 + +// maxAddrLen is the maximum socket address length we're willing to accept. +const maxAddrLen = 200 + +// maxOptLen is the maximum sockopt parameter length we're willing to accept. +const maxOptLen = 1024 + +// maxControlLen is the maximum length of the msghdr.msg_control buffer we're +// willing to accept. Note that this limit is smaller than Linux, which allows +// buffers upto INT_MAX. +const maxControlLen = 10 * 1024 * 1024 + +// nameLenOffset is the offset from the start of the MessageHeader64 struct to +// the NameLen field. +const nameLenOffset = 8 + +// controlLenOffset is the offset form the start of the MessageHeader64 struct +// to the ControlLen field. +const controlLenOffset = 40 + +// messageHeader64Len is the length of a MessageHeader64 struct. +var messageHeader64Len = uint64(binary.Size(MessageHeader64{})) + +// multipleMessageHeader64Len is the length of a multipeMessageHeader64 struct. +var multipleMessageHeader64Len = uint64(binary.Size(multipleMessageHeader64{})) + +// MessageHeader64 is the 64-bit representation of the msghdr struct used in +// the recvmsg and sendmsg syscalls. +type MessageHeader64 struct { + // Name is the optional pointer to a network address buffer. + Name uint64 + + // NameLen is the length of the buffer pointed to by Name. + NameLen uint32 + _ uint32 + + // Iov is a pointer to an array of io vectors that describe the memory + // locations involved in the io operation. + Iov uint64 + + // IovLen is the length of the array pointed to by Iov. + IovLen uint64 + + // Control is the optional pointer to ancillary control data. + Control uint64 + + // ControlLen is the length of the data pointed to by Control. + ControlLen uint64 + + // Flags on the sent/received message. + Flags int32 + _ int32 +} + +// multipleMessageHeader64 is the 64-bit representation of the mmsghdr struct used in +// the recvmmsg and sendmmsg syscalls. +type multipleMessageHeader64 struct { + msgHdr MessageHeader64 + msgLen uint32 + _ int32 +} + +// CopyInMessageHeader64 copies a message header from user to kernel memory. +func CopyInMessageHeader64(t *kernel.Task, addr usermem.Addr, msg *MessageHeader64) error { + b := t.CopyScratchBuffer(52) + if _, err := t.CopyInBytes(addr, b); err != nil { + return err + } + + msg.Name = usermem.ByteOrder.Uint64(b[0:]) + msg.NameLen = usermem.ByteOrder.Uint32(b[8:]) + msg.Iov = usermem.ByteOrder.Uint64(b[16:]) + msg.IovLen = usermem.ByteOrder.Uint64(b[24:]) + msg.Control = usermem.ByteOrder.Uint64(b[32:]) + msg.ControlLen = usermem.ByteOrder.Uint64(b[40:]) + msg.Flags = int32(usermem.ByteOrder.Uint32(b[48:])) + + return nil +} + +// CaptureAddress allocates memory for and copies a socket address structure +// from the untrusted address space range. +func CaptureAddress(t *kernel.Task, addr usermem.Addr, addrlen uint32) ([]byte, error) { + if addrlen > maxAddrLen { + return nil, syscall.EINVAL + } + + addrBuf := make([]byte, addrlen) + if _, err := t.CopyInBytes(addr, addrBuf); err != nil { + return nil, err + } + + return addrBuf, nil +} + +// writeAddress writes a sockaddr structure and its length to an output buffer +// in the unstrusted address space range. If the address is bigger than the +// buffer, it is truncated. +func writeAddress(t *kernel.Task, addr interface{}, addrLen uint32, addrPtr usermem.Addr, addrLenPtr usermem.Addr) error { + // Get the buffer length. + var bufLen uint32 + if _, err := t.CopyIn(addrLenPtr, &bufLen); err != nil { + return err + } + + if int32(bufLen) < 0 { + return syscall.EINVAL + } + + // Write the length unconditionally. + if _, err := t.CopyOut(addrLenPtr, addrLen); err != nil { + return err + } + + if addr == nil { + return nil + } + + if bufLen > addrLen { + bufLen = addrLen + } + + // Copy as much of the address as will fit in the buffer. + encodedAddr := binary.Marshal(nil, usermem.ByteOrder, addr) + if bufLen > uint32(len(encodedAddr)) { + bufLen = uint32(len(encodedAddr)) + } + _, err := t.CopyOutBytes(addrPtr, encodedAddr[:int(bufLen)]) + return err +} + +// Socket implements the linux syscall socket(2). +func Socket(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + domain := int(args[0].Int()) + stype := args[1].Int() + protocol := int(args[2].Int()) + + // Check and initialize the flags. + if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { + return 0, nil, syscall.EINVAL + } + + // Create the new socket. + s, e := socket.New(t, domain, unix.SockType(stype&0xf), protocol) + if e != nil { + return 0, nil, e.ToError() + } + s.SetFlags(fs.SettableFileFlags{ + NonBlocking: stype&linux.SOCK_NONBLOCK != 0, + }) + defer s.DecRef() + + fd, err := t.FDMap().NewFDFrom(0, s, kernel.FDFlags{ + CloseOnExec: stype&linux.SOCK_CLOEXEC != 0, + }, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// SocketPair implements the linux syscall socketpair(2). +func SocketPair(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + domain := int(args[0].Int()) + stype := args[1].Int() + protocol := int(args[2].Int()) + socks := args[3].Pointer() + + // Check and initialize the flags. + if stype & ^(0xf|linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { + return 0, nil, syscall.EINVAL + } + + fileFlags := fs.SettableFileFlags{ + NonBlocking: stype&linux.SOCK_NONBLOCK != 0, + } + fdFlags := kernel.FDFlags{ + CloseOnExec: stype&linux.SOCK_CLOEXEC != 0, + } + + // Create the socket pair. + s1, s2, e := socket.Pair(t, domain, unix.SockType(stype&0xf), protocol) + if e != nil { + return 0, nil, e.ToError() + } + s1.SetFlags(fileFlags) + s2.SetFlags(fileFlags) + defer s1.DecRef() + defer s2.DecRef() + + // Create the FDs for the sockets. + fd1, err := t.FDMap().NewFDFrom(0, s1, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + fd2, err := t.FDMap().NewFDFrom(0, s2, fdFlags, t.ThreadGroup().Limits()) + if err != nil { + t.FDMap().Remove(fd1) + return 0, nil, err + } + + // Copy the file descriptors out. + if _, err := t.CopyOut(socks, []int32{int32(fd1), int32(fd2)}); err != nil { + t.FDMap().Remove(fd1) + t.FDMap().Remove(fd2) + return 0, nil, err + } + + return 0, nil, nil +} + +// Connect implements the linux syscall connect(2). +func Connect(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Uint() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Capture address and call syscall implementation. + a, err := CaptureAddress(t, addr, addrlen) + if err != nil { + return 0, nil, err + } + + blocking := !file.Flags().NonBlocking + return 0, nil, syserror.ConvertIntr(s.Connect(t, a, blocking).ToError(), kernel.ERESTARTSYS) +} + +// accept is the implementation of the accept syscall. It is called by accept +// and accept4 syscall handlers. +func accept(t *kernel.Task, fd kdefs.FD, addr usermem.Addr, addrLen usermem.Addr, flags int) (uintptr, error) { + // Check that no unsupported flags are passed in. + if flags & ^(linux.SOCK_NONBLOCK|linux.SOCK_CLOEXEC) != 0 { + return 0, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, syscall.ENOTSOCK + } + + // Call the syscall implementation for this socket, then copy the + // output address if one is specified. + blocking := !file.Flags().NonBlocking + + peerRequested := addrLen != 0 + nfd, peer, peerLen, e := s.Accept(t, peerRequested, flags, blocking) + if e != nil { + return 0, syserror.ConvertIntr(e.ToError(), kernel.ERESTARTSYS) + } + if peerRequested { + // NOTE: Linux does not give you an error if it can't + // write the data back out so neither do we. + if err := writeAddress(t, peer, peerLen, addr, addrLen); err == syscall.EINVAL { + return 0, err + } + } + return uintptr(nfd), nil +} + +// Accept4 implements the linux syscall accept4(2). +func Accept4(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Pointer() + flags := int(args[3].Int()) + + n, err := accept(t, fd, addr, addrlen, flags) + return n, nil, err +} + +// Accept implements the linux syscall accept(2). +func Accept(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Pointer() + + n, err := accept(t, fd, addr, addrlen, 0) + return n, nil, err +} + +// Bind implements the linux syscall bind(2). +func Bind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Uint() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Capture address and call syscall implementation. + a, err := CaptureAddress(t, addr, addrlen) + if err != nil { + return 0, nil, err + } + + return 0, nil, s.Bind(t, a).ToError() +} + +// Listen implements the linux syscall listen(2). +func Listen(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + backlog := args[1].Int() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Per Linux, the backlog is silently capped to reasonable values. + if backlog <= 0 { + backlog = minListenBacklog + } + if backlog > maxListenBacklog { + backlog = maxListenBacklog + } + + return 0, nil, s.Listen(t, int(backlog)).ToError() +} + +// Shutdown implements the linux syscall shutdown(2). +func Shutdown(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + how := args[1].Int() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Validate how, then call syscall implementation. + switch how { + case linux.SHUT_RD, linux.SHUT_WR, linux.SHUT_RDWR: + default: + return 0, nil, syscall.EINVAL + } + + return 0, nil, s.Shutdown(t, int(how)).ToError() +} + +// GetSockOpt implements the linux syscall getsockopt(2). +func GetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + level := args[1].Int() + name := args[2].Int() + optValAddr := args[3].Pointer() + optLenAddr := args[4].Pointer() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Read the length if present. Reject negative values. + optLen := int32(0) + if optLenAddr != 0 { + if _, err := t.CopyIn(optLenAddr, &optLen); err != nil { + return 0, nil, err + } + + if optLen < 0 { + return 0, nil, syscall.EINVAL + } + } + + // Call syscall implementation then copy both value and value len out. + v, e := s.GetSockOpt(t, int(level), int(name), int(optLen)) + if e != nil { + return 0, nil, e.ToError() + } + + if optLenAddr != 0 { + vLen := int32(binary.Size(v)) + if _, err := t.CopyOut(optLenAddr, vLen); err != nil { + return 0, nil, err + } + } + + if v != nil { + if _, err := t.CopyOut(optValAddr, v); err != nil { + return 0, nil, err + } + } + + return 0, nil, nil +} + +// SetSockOpt implements the linux syscall setsockopt(2). +// +// Note that unlike Linux, enabling SO_PASSCRED does not autobind the socket. +func SetSockOpt(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + level := args[1].Int() + name := args[2].Int() + optValAddr := args[3].Pointer() + optLen := args[4].Int() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + if optLen <= 0 { + return 0, nil, syscall.EINVAL + } + if optLen > maxOptLen { + return 0, nil, syscall.EINVAL + } + buf := make([]byte, optLen) + if _, err := t.CopyIn(optValAddr, &buf); err != nil { + return 0, nil, err + } + + // Call syscall implementation. + if err := s.SetSockOpt(t, int(level), int(name), buf); err != nil { + return 0, nil, err.ToError() + } + + return 0, nil, nil +} + +// GetSockName implements the linux syscall getsockname(2). +func GetSockName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Pointer() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Get the socket name and copy it to the caller. + v, vl, err := s.GetSockName(t) + if err != nil { + return 0, nil, err.ToError() + } + + return 0, nil, writeAddress(t, v, vl, addr, addrlen) +} + +// GetPeerName implements the linux syscall getpeername(2). +func GetPeerName(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + addrlen := args[2].Pointer() + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Get the socket peer name and copy it to the caller. + v, vl, err := s.GetPeerName(t) + if err != nil { + return 0, nil, err.ToError() + } + + return 0, nil, writeAddress(t, v, vl, addr, addrlen) +} + +// RecvMsg implements the linux syscall recvmsg(2). +func RecvMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + msgPtr := args[1].Pointer() + flags := args[2].Int() + + if t.Arch().Width() != 8 { + // We only handle 64-bit for now. + return 0, nil, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Reject flags that we don't handle yet. + if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 { + return 0, nil, syscall.EINVAL + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + n, err := recvSingleMsg(t, s, msgPtr, flags, false, ktime.Time{}) + return n, nil, err +} + +// RecvMMsg implements the linux syscall recvmmsg(2). +func RecvMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + msgPtr := args[1].Pointer() + vlen := args[2].Uint() + flags := args[3].Int() + toPtr := args[4].Pointer() + + if t.Arch().Width() != 8 { + // We only handle 64-bit for now. + return 0, nil, syscall.EINVAL + } + + // Reject flags that we don't handle yet. + if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_TRUNC|linux.MSG_CMSG_CLOEXEC|linux.MSG_ERRQUEUE) != 0 { + return 0, nil, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + var haveDeadline bool + var deadline ktime.Time + if toPtr != 0 { + ts, err := copyTimespecIn(t, toPtr) + if err != nil { + return 0, nil, err + } + if !ts.Valid() { + return 0, nil, syscall.EINVAL + } + deadline = t.Kernel().MonotonicClock().Now().Add(ts.ToDuration()) + haveDeadline = true + } + + if !haveDeadline { + dl := s.RecvTimeout() + if dl != 0 { + deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) + haveDeadline = true + } + } + + var count uint32 + var err error + for i := uint64(0); i < uint64(vlen); i++ { + mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len) + if !ok { + return 0, nil, syscall.EFAULT + } + var n uintptr + if n, err = recvSingleMsg(t, s, mp, flags, haveDeadline, deadline); err != nil { + break + } + + // Copy the received length to the caller. + lp, ok := mp.AddLength(messageHeader64Len) + if !ok { + return 0, nil, syscall.EFAULT + } + if _, err = t.CopyOut(lp, uint32(n)); err != nil { + break + } + count++ + } + + if count == 0 { + return 0, nil, err + } + return uintptr(count), nil, nil +} + +func recvSingleMsg(t *kernel.Task, s socket.Socket, msgPtr usermem.Addr, flags int32, haveDeadline bool, deadline ktime.Time) (uintptr, error) { + // Capture the message header and io vectors. + var msg MessageHeader64 + if err := CopyInMessageHeader64(t, msgPtr, &msg); err != nil { + return 0, err + } + + if msg.IovLen > linux.UIO_MAXIOV { + return 0, syscall.EMSGSIZE + } + dst, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + // FIXME: Pretend we have an empty error queue. + if flags&linux.MSG_ERRQUEUE != 0 { + return 0, syscall.EAGAIN + } + + // Fast path when no control message nor name buffers are provided. + if msg.ControlLen == 0 && msg.NameLen == 0 { + n, _, _, _, err := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, false, 0) + if err != nil { + return 0, syserror.ConvertIntr(err.ToError(), kernel.ERESTARTSYS) + } + return uintptr(n), nil + } + + if msg.ControlLen > maxControlLen { + return 0, syscall.ENOBUFS + } + n, sender, senderLen, cms, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, msg.NameLen != 0, msg.ControlLen) + if e != nil { + return 0, syserror.ConvertIntr(e.ToError(), kernel.ERESTARTSYS) + } + defer cms.Release() + + controlData := make([]byte, 0, msg.ControlLen) + + if cr, ok := s.(unix.Credentialer); ok && cr.Passcred() { + creds, _ := cms.Credentials.(control.SCMCredentials) + controlData = control.PackCredentials(t, creds, controlData) + } + + if cms.Rights != nil { + controlData = control.PackRights(t, cms.Rights.(control.SCMRights), flags&linux.MSG_CMSG_CLOEXEC != 0, controlData) + } + + // Copy the address to the caller. + if msg.NameLen != 0 { + if err := writeAddress(t, sender, senderLen, usermem.Addr(msg.Name), usermem.Addr(msgPtr+nameLenOffset)); err != nil { + return 0, err + } + } + + // Copy the control data to the caller. + if _, err := t.CopyOut(msgPtr+controlLenOffset, uint64(len(controlData))); err != nil { + return 0, err + } + if len(controlData) > 0 { + if _, err := t.CopyOut(usermem.Addr(msg.Control), controlData); err != nil { + return 0, err + } + } + + return uintptr(n), nil +} + +// recvFrom is the implementation of the recvfrom syscall. It is called by +// recvfrom and recv syscall handlers. +func recvFrom(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLenPtr usermem.Addr) (uintptr, error) { + if int(bufLen) < 0 { + return 0, syscall.EINVAL + } + + // Reject flags that we don't handle yet. + if flags & ^(linux.MSG_DONTWAIT|linux.MSG_NOSIGNAL|linux.MSG_PEEK|linux.MSG_TRUNC) != 0 { + return 0, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, syscall.ENOTSOCK + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + dst, err := t.SingleIOSequence(bufPtr, int(bufLen), usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + var haveDeadline bool + var deadline ktime.Time + + if dl := s.RecvTimeout(); dl != 0 { + deadline = t.Kernel().MonotonicClock().Now().Add(time.Duration(dl) * time.Nanosecond) + haveDeadline = true + } + + n, sender, senderLen, cm, e := s.RecvMsg(t, dst, int(flags), haveDeadline, deadline, nameLenPtr != 0, 0) + cm.Release() + if e != nil { + return 0, syserror.ConvertIntr(e.ToError(), kernel.ERESTARTSYS) + } + + // Copy the address to the caller. + if nameLenPtr != 0 { + if err := writeAddress(t, sender, senderLen, namePtr, nameLenPtr); err != nil { + return 0, err + } + } + + return uintptr(n), nil +} + +// RecvFrom implements the linux syscall recvfrom(2). +func RecvFrom(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + bufPtr := args[1].Pointer() + bufLen := args[2].Uint64() + flags := args[3].Int() + namePtr := args[4].Pointer() + nameLenPtr := args[5].Pointer() + + n, err := recvFrom(t, fd, bufPtr, bufLen, flags, namePtr, nameLenPtr) + return n, nil, err +} + +// SendMsg implements the linux syscall sendmsg(2). +func SendMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + msgPtr := args[1].Pointer() + flags := args[2].Int() + + if t.Arch().Width() != 8 { + // We only handle 64-bit for now. + return 0, nil, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Reject flags that we don't handle yet. + if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 { + return 0, nil, syscall.EINVAL + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + n, err := sendSingleMsg(t, s, file, msgPtr, flags) + return n, nil, err +} + +// SendMMsg implements the linux syscall sendmmsg(2). +func SendMMsg(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + msgPtr := args[1].Pointer() + vlen := args[2].Uint() + flags := args[3].Int() + + if t.Arch().Width() != 8 { + // We only handle 64-bit for now. + return 0, nil, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, nil, syscall.ENOTSOCK + } + + // Reject flags that we don't handle yet. + if flags & ^(linux.MSG_DONTWAIT|linux.MSG_EOR|linux.MSG_MORE|linux.MSG_NOSIGNAL) != 0 { + return 0, nil, syscall.EINVAL + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + var count uint32 + var err error + for i := uint64(0); i < uint64(vlen); i++ { + mp, ok := msgPtr.AddLength(i * multipleMessageHeader64Len) + if !ok { + return 0, nil, syscall.EFAULT + } + var n uintptr + if n, err = sendSingleMsg(t, s, file, mp, flags); err != nil { + break + } + + // Copy the received length to the caller. + lp, ok := mp.AddLength(messageHeader64Len) + if !ok { + return 0, nil, syscall.EFAULT + } + if _, err = t.CopyOut(lp, uint32(n)); err != nil { + break + } + count++ + } + + if count == 0 { + return 0, nil, err + } + return uintptr(count), nil, nil +} + +func sendSingleMsg(t *kernel.Task, s socket.Socket, file *fs.File, msgPtr usermem.Addr, flags int32) (uintptr, error) { + // Capture the message header. + var msg MessageHeader64 + if err := CopyInMessageHeader64(t, msgPtr, &msg); err != nil { + return 0, err + } + + var controlData []byte + if msg.ControlLen > 0 { + // Put an upper bound to prevent large allocations. + if msg.ControlLen > maxControlLen { + return 0, syscall.ENOBUFS + } + controlData = make([]byte, msg.ControlLen) + if _, err := t.CopyIn(usermem.Addr(msg.Control), &controlData); err != nil { + return 0, err + } + } + + // Read the destination address if one is specified. + var to []byte + if msg.NameLen != 0 { + var err error + to, err = CaptureAddress(t, usermem.Addr(msg.Name), msg.NameLen) + if err != nil { + return 0, err + } + } + + // Read data then call the sendmsg implementation. + if msg.IovLen > linux.UIO_MAXIOV { + return 0, syscall.EMSGSIZE + } + src, err := t.IovecsIOSequence(usermem.Addr(msg.Iov), int(msg.IovLen), usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + controlMessages, err := control.Parse(t, s, controlData) + if err != nil { + return 0, err + } + + // Call the syscall implementation. + n, e := s.SendMsg(t, src, to, int(flags), controlMessages) + err = handleIOError(t, n != 0, e.ToError(), kernel.ERESTARTSYS, "sendmsg", file) + if err != nil { + controlMessages.Release() + } + return uintptr(n), err +} + +// sendTo is the implementation of the sendto syscall. It is called by sendto +// and send syscall handlers. +func sendTo(t *kernel.Task, fd kdefs.FD, bufPtr usermem.Addr, bufLen uint64, flags int32, namePtr usermem.Addr, nameLen uint32) (uintptr, error) { + bl := int(bufLen) + if bl < 0 { + return 0, syscall.EINVAL + } + + // Get socket from the file descriptor. + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, syscall.EBADF + } + defer file.DecRef() + + // Extract the socket. + s, ok := file.FileOperations.(socket.Socket) + if !ok { + return 0, syscall.ENOTSOCK + } + + if file.Flags().NonBlocking { + flags |= linux.MSG_DONTWAIT + } + + // Read the destination address if one is specified. + var to []byte + var err error + if namePtr != 0 { + to, err = CaptureAddress(t, namePtr, nameLen) + if err != nil { + return 0, err + } + } + + src, err := t.SingleIOSequence(bufPtr, bl, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, err + } + + // Call the syscall implementation. + n, e := s.SendMsg(t, src, to, int(flags), control.New(t, s, nil)) + return uintptr(n), handleIOError(t, n != 0, e.ToError(), kernel.ERESTARTSYS, "sendto", file) +} + +// SendTo implements the linux syscall sendto(2). +func SendTo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + bufPtr := args[1].Pointer() + bufLen := args[2].Uint64() + flags := args[3].Int() + namePtr := args[4].Pointer() + nameLen := args[5].Uint() + + n, err := sendTo(t, fd, bufPtr, bufLen, flags, namePtr, nameLen) + return n, nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_stat.go b/pkg/sentry/syscalls/linux/sys_stat.go new file mode 100644 index 000000000..6e21b34fd --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_stat.go @@ -0,0 +1,209 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Stat implements linux syscall stat(2). +func Stat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + statAddr := args[1].Pointer() + + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return stat(t, d, dirPath, statAddr) + }) +} + +// Fstatat implements linux syscall newfstatat, i.e. fstatat(2). +func Fstatat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + statAddr := args[2].Pointer() + flags := args[3].Int() + + path, dirPath, err := copyInPath(t, addr, flags&linux.AT_EMPTY_PATH != 0) + if err != nil { + return 0, nil, err + } + + if path == "" { + // Annoying. What's wrong with fstat? + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + return 0, nil, stat(t, file.Dirent, false, statAddr) + } + + return 0, nil, fileOpOn(t, fd, path, flags&linux.AT_SYMLINK_NOFOLLOW == 0, func(root *fs.Dirent, d *fs.Dirent) error { + return stat(t, d, dirPath, statAddr) + }) +} + +// Lstat implements linux syscall lstat(2). +func Lstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + statAddr := args[1].Pointer() + + path, dirPath, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, false /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return stat(t, d, dirPath, statAddr) + }) +} + +// Fstat implements linux syscall fstat(2). +func Fstat(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + statAddr := args[1].Pointer() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + return 0, nil, stat(t, file.Dirent, false /* dirPath */, statAddr) +} + +// stat implements stat from the given *fs.Dirent. +func stat(t *kernel.Task, d *fs.Dirent, dirPath bool, statAddr usermem.Addr) error { + if dirPath && !fs.IsDir(d.Inode.StableAttr) { + return syserror.ENOTDIR + } + uattr, err := d.Inode.UnstableAttr(t) + if err != nil { + return err + } + + var mode uint32 + switch d.Inode.StableAttr.Type { + case fs.RegularFile, fs.SpecialFile: + mode |= linux.ModeRegular + case fs.Symlink: + mode |= linux.ModeSymlink + case fs.Directory, fs.SpecialDirectory: + mode |= linux.ModeDirectory + case fs.Pipe: + mode |= linux.ModeNamedPipe + case fs.CharacterDevice: + mode |= linux.ModeCharacterDevice + case fs.BlockDevice: + mode |= linux.ModeBlockDevice + case fs.Socket: + mode |= linux.ModeSocket + } + + _, err = t.CopyOut(statAddr, linux.Stat{ + Dev: uint64(d.Inode.StableAttr.DeviceID), + Rdev: uint64(linux.MakeDeviceID(d.Inode.StableAttr.DeviceFileMajor, d.Inode.StableAttr.DeviceFileMinor)), + Ino: uint64(d.Inode.StableAttr.InodeID), + Nlink: uattr.Links, + Mode: mode | uint32(uattr.Perms.LinuxMode()), + UID: uint32(uattr.Owner.UID.In(t.UserNamespace()).OrOverflow()), + GID: uint32(uattr.Owner.GID.In(t.UserNamespace()).OrOverflow()), + Size: uattr.Size, + Blksize: d.Inode.StableAttr.BlockSize, + Blocks: uattr.Usage / 512, + ATime: uattr.AccessTime.Timespec(), + MTime: uattr.ModificationTime.Timespec(), + CTime: uattr.StatusChangeTime.Timespec(), + }) + return err +} + +// Statfs implements linux syscall statfs(2). +func Statfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + statfsAddr := args[1].Pointer() + + path, _, err := copyInPath(t, addr, false /* allowEmpty */) + if err != nil { + return 0, nil, err + } + + return 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent) error { + return statfsImpl(t, d, statfsAddr) + }) +} + +// Fstatfs implements linux syscall fstatfs(2). +func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + statfsAddr := args[1].Pointer() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + return 0, nil, statfsImpl(t, file.Dirent, statfsAddr) +} + +// statfsImpl implements the linux syscall statfs and fstatfs based on a Dirent, +// copying the statfs structure out to addr on success, otherwise an error is +// returned. +func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error { + info, err := d.Inode.StatFS(t) + if err != nil { + return err + } + // Construct the statfs structure and copy it out. + statfs := linux.Statfs{ + Type: info.Type, + // Treat block size and fragment size as the same, as + // most consumers of this structure will expect one + // or the other to be filled in. + BlockSize: d.Inode.StableAttr.BlockSize, + Blocks: info.TotalBlocks, + // We don't have the concept of reserved blocks, so + // report blocks free the same as available blocks. + // This is a normal thing for filesystems, to do, see + // udf, hugetlbfs, tmpfs, among others. + BlocksFree: info.FreeBlocks, + BlocksAvailable: info.FreeBlocks, + Files: info.TotalFiles, + FilesFree: info.FreeFiles, + // Same as Linux for simple_statfs, see fs/libfs.c. + NameLength: syscall.PathMax, + FragmentSize: d.Inode.StableAttr.BlockSize, + // Leave other fields 0 like simple_statfs does. + } + if _, err := t.CopyOut(addr, &statfs); err != nil { + return err + } + return nil +} diff --git a/pkg/sentry/syscalls/linux/sys_sync.go b/pkg/sentry/syscalls/linux/sys_sync.go new file mode 100644 index 000000000..902d210db --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_sync.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Sync implements linux system call sync(2). +func Sync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + t.MountNamespace().SyncAll(t) + // Sync is always successful. + return 0, nil, nil +} + +// Syncfs implements linux system call syncfs(2). +func Syncfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Use "sync-the-world" for now, it's guaranteed that fd is at least + // on the root filesystem. + return Sync(t, args) +} + +// Fsync implements linux syscall fsync(2). +func Fsync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + err := file.Fsync(t, 0, fs.FileMaxOffset, fs.SyncAll) + return 0, nil, syserror.ConvertIntr(err, kernel.ERESTARTSYS) +} + +// Fdatasync implements linux syscall fdatasync(2). +// +// At the moment, it just calls Fsync, which is a big hammer, but correct. +func Fdatasync(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + err := file.Fsync(t, 0, fs.FileMaxOffset, fs.SyncData) + return 0, nil, syserror.ConvertIntr(err, kernel.ERESTARTSYS) +} diff --git a/pkg/sentry/syscalls/linux/sys_sysinfo.go b/pkg/sentry/syscalls/linux/sys_sysinfo.go new file mode 100644 index 000000000..bd0ffcd5c --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_sysinfo.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usage" +) + +// Sysinfo implements the sysinfo syscall as described in man 2 sysinfo. +func Sysinfo(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + mem := t.Kernel().Platform.Memory() + mem.UpdateUsage() + _, totalUsage := usage.MemoryAccounting.Copy() + totalSize := usage.TotalMemory(mem.TotalSize(), totalUsage) + + // Only a subset of the fields in sysinfo_t make sense to return. + si := linux.Sysinfo{ + Procs: uint16(len(t.PIDNamespace().Tasks())), + Uptime: t.Kernel().MonotonicClock().Now().Seconds(), + TotalRAM: totalSize, + FreeRAM: totalSize - totalUsage, + } + _, err := t.CopyOut(addr, si) + return 0, nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_syslog.go b/pkg/sentry/syscalls/linux/sys_syslog.go new file mode 100644 index 000000000..792040c81 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_syslog.go @@ -0,0 +1,61 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + _SYSLOG_ACTION_READ_ALL = 3 + _SYSLOG_ACTION_SIZE_BUFFER = 10 +) + +// logBufLen is the default syslog buffer size on Linux. +const logBufLen = 1 << 17 + +// Syslog implements part of Linux syscall syslog. +// +// Only the unpriviledged commands are implemented, allowing applications to +// read a fun dmesg. +func Syslog(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + command := args[0].Int() + buf := args[1].Pointer() + size := int(args[2].Int()) + + switch command { + case _SYSLOG_ACTION_READ_ALL: + if size < 0 { + return 0, nil, syserror.EINVAL + } + if size > logBufLen { + size = logBufLen + } + + log := t.Kernel().Syslog().Log() + if len(log) > size { + log = log[:size] + } + + n, err := t.CopyOutBytes(buf, log) + return uintptr(n), nil, err + case _SYSLOG_ACTION_SIZE_BUFFER: + return logBufLen, nil, nil + default: + return 0, nil, syserror.ENOSYS + } +} diff --git a/pkg/sentry/syscalls/linux/sys_thread.go b/pkg/sentry/syscalls/linux/sys_thread.go new file mode 100644 index 000000000..0adbf160f --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_thread.go @@ -0,0 +1,704 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/sched" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const ( + // ExecMaxTotalSize is the maximum length of all argv and envv entries. + // + // N.B. The behavior here is different than Linux. Linux provides a limit on + // individual arguments of 32 pages, and an aggregate limit of at least 32 pages + // but otherwise bounded by min(stack size / 4, 8 MB * 3 / 4). We don't implement + // any behavior based on the stack size, and instead provide a fixed hard-limit of + // 2 MB (which should work well given that 8 MB stack limits are common). + ExecMaxTotalSize = 2 * 1024 * 1024 + + // ExecMaxElemSize is the maximum length of a single argv or envv entry. + ExecMaxElemSize = 32 * usermem.PageSize + + // exitSignalMask is the signal mask to be sent at exit. Same as CSIGNAL in linux. + exitSignalMask = 0xff +) + +// Possible values for the idtype argument to waitid(2), defined in Linux's +// include/uapi/linux/wait.h. +const ( + _P_ALL = 0 + _P_PID = 1 + _P_PGID = 2 +) + +// Getppid implements linux syscall getppid(2). +func Getppid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + parent := t.Parent() + if parent == nil { + return 0, nil, nil + } + return uintptr(t.PIDNamespace().IDOfThreadGroup(parent.ThreadGroup())), nil, nil +} + +// Getpid implements linux syscall getpid(2). +func Getpid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return uintptr(t.ThreadGroup().ID()), nil, nil +} + +// Gettid implements linux syscall gettid(2). +func Gettid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return uintptr(t.ThreadID()), nil, nil +} + +// Execve implements linux syscall execve(2). +func Execve(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + filenameAddr := args[0].Pointer() + argvAddr := args[1].Pointer() + envvAddr := args[2].Pointer() + + // Extract our arguments. + filename, err := t.CopyInString(filenameAddr, syscall.PathMax) + if err != nil { + return 0, nil, err + } + + var argv, envv []string + if argvAddr != 0 { + var err error + argv, err = t.CopyInVector(argvAddr, ExecMaxElemSize, ExecMaxTotalSize) + if err != nil { + return 0, nil, err + } + } + if envvAddr != 0 { + var err error + envv, err = t.CopyInVector(envvAddr, ExecMaxElemSize, ExecMaxTotalSize) + if err != nil { + return 0, nil, err + } + } + + root := t.FSContext().RootDirectory() + defer root.DecRef() + wd := t.FSContext().WorkingDirectory() + defer wd.DecRef() + + // Load the new TaskContext. + tc, err := t.Kernel().LoadTaskImage(t, t.MountNamespace(), root, wd, linux.MaxSymlinkTraversals, filename, argv, envv, t.Arch().FeatureSet()) + if err != nil { + return 0, nil, err + } + + ctrl, err := t.Execve(tc) + return 0, ctrl, err +} + +// Exit implements linux syscall exit(2). +func Exit(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + status := int(args[0].Int()) + t.PrepareExit(kernel.ExitStatus{Code: status}) + return 0, kernel.CtrlDoExit, nil +} + +// ExitGroup implements linux syscall exit_group(2). +func ExitGroup(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + status := int(args[0].Int()) + t.PrepareGroupExit(kernel.ExitStatus{Code: status}) + return 0, kernel.CtrlDoExit, nil +} + +// clone is used by Clone, Fork, and VFork. +func clone(t *kernel.Task, flags int, stack usermem.Addr, parentTID usermem.Addr, childTID usermem.Addr, tls usermem.Addr) (uintptr, *kernel.SyscallControl, error) { + opts := kernel.CloneOptions{ + SharingOptions: kernel.SharingOptions{ + NewAddressSpace: flags&syscall.CLONE_VM == 0, + NewSignalHandlers: flags&syscall.CLONE_SIGHAND == 0, + NewThreadGroup: flags&syscall.CLONE_THREAD == 0, + TerminationSignal: linux.Signal(flags & exitSignalMask), + NewPIDNamespace: flags&syscall.CLONE_NEWPID == syscall.CLONE_NEWPID, + NewUserNamespace: flags&syscall.CLONE_NEWUSER == syscall.CLONE_NEWUSER, + NewNetworkNamespace: flags&syscall.CLONE_NEWNET == syscall.CLONE_NEWNET, + NewFiles: flags&syscall.CLONE_FILES == 0, + NewFSContext: flags&syscall.CLONE_FS == 0, + NewUTSNamespace: flags&syscall.CLONE_NEWUTS == syscall.CLONE_NEWUTS, + NewIPCNamespace: flags&syscall.CLONE_NEWIPC == syscall.CLONE_NEWIPC, + }, + Stack: stack, + SetTLS: flags&syscall.CLONE_SETTLS == syscall.CLONE_SETTLS, + TLS: tls, + ChildClearTID: flags&syscall.CLONE_CHILD_CLEARTID == syscall.CLONE_CHILD_CLEARTID, + ChildSetTID: flags&syscall.CLONE_CHILD_SETTID == syscall.CLONE_CHILD_SETTID, + ChildTID: childTID, + ParentSetTID: flags&syscall.CLONE_PARENT_SETTID == syscall.CLONE_PARENT_SETTID, + ParentTID: parentTID, + Vfork: flags&syscall.CLONE_VFORK == syscall.CLONE_VFORK, + Untraced: flags&syscall.CLONE_UNTRACED == syscall.CLONE_UNTRACED, + InheritTracer: flags&syscall.CLONE_PTRACE == syscall.CLONE_PTRACE, + } + ntid, ctrl, err := t.Clone(&opts) + return uintptr(ntid), ctrl, err +} + +// Clone implements linux syscall clone(2). +// sys_clone has so many flavors. We implement the default one in the +// current linux 3.11 x86_64: +// sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr, tls_val) +func Clone(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + flags := int(args[0].Int()) + stack := args[1].Pointer() + parentTID := args[2].Pointer() + childTID := args[3].Pointer() + tls := args[4].Pointer() + return clone(t, flags, stack, parentTID, childTID, tls) +} + +// Fork implements Linux syscall fork(2). +func Fork(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + // "A call to fork() is equivalent to a call to clone(2) specifying flags + // as just SIGCHLD." - fork(2) + return clone(t, int(syscall.SIGCHLD), 0, 0, 0, 0) +} + +// Vfork implements Linux syscall vfork(2). +func Vfork(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + // """ + // A call to vfork() is equivalent to calling clone(2) with flags specified as: + // + // CLONE_VM | CLONE_VFORK | SIGCHLD + // """ - vfork(2) + return clone(t, syscall.CLONE_VM|syscall.CLONE_VFORK|int(syscall.SIGCHLD), 0, 0, 0, 0) +} + +// wait4 waits for the given child process to exit. +func wait4(t *kernel.Task, pid int, statusAddr usermem.Addr, options int, rusageAddr usermem.Addr) (uintptr, error) { + if options&^(syscall.WNOHANG|syscall.WUNTRACED|syscall.WCONTINUED|syscall.WALL|syscall.WCLONE) != 0 { + return 0, syscall.EINVAL + } + wopts := kernel.WaitOptions{ + Events: kernel.EventExit | kernel.EventTraceeStop, + ConsumeEvent: true, + } + // There are four cases to consider: + // + // pid < -1 any child process whose process group ID is equal to the absolute value of pid + // pid == -1 any child process + // pid == 0 any child process whose process group ID is equal to that of the calling process + // pid > 0 the child whose process ID is equal to the value of pid + switch { + case pid < -1: + wopts.SpecificPGID = kernel.ProcessGroupID(-pid) + case pid == -1: + // Any process is the default. + case pid == 0: + wopts.SpecificPGID = t.PIDNamespace().IDOfProcessGroup(t.ThreadGroup().ProcessGroup()) + default: + wopts.SpecificTID = kernel.ThreadID(pid) + } + + switch options & (syscall.WCLONE | syscall.WALL) { + case 0: + wopts.NonCloneTasks = true + case syscall.WCLONE: + wopts.CloneTasks = true + case syscall.WALL: + wopts.NonCloneTasks = true + wopts.CloneTasks = true + default: + return 0, syscall.EINVAL + } + if options&syscall.WUNTRACED != 0 { + wopts.Events |= kernel.EventChildGroupStop + } + if options&syscall.WCONTINUED != 0 { + wopts.Events |= kernel.EventGroupContinue + } + if options&syscall.WNOHANG == 0 { + wopts.BlockInterruptErr = kernel.ERESTARTSYS + } + + wr, err := t.Wait(&wopts) + if err != nil { + if err == kernel.ErrNoWaitableEvent { + return 0, nil + } + return 0, err + } + if statusAddr != 0 { + if _, err := t.CopyOut(statusAddr, wr.Status); err != nil { + return 0, err + } + } + if rusageAddr != 0 { + ru := getrusage(wr.Task, linux.RUSAGE_BOTH) + if _, err := t.CopyOut(rusageAddr, &ru); err != nil { + return 0, err + } + } + return uintptr(wr.TID), nil +} + +// Wait4 implements linux syscall wait4(2). +func Wait4(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := int(args[0].Int()) + statusAddr := args[1].Pointer() + options := int(args[2].Uint()) + rusageAddr := args[3].Pointer() + + n, err := wait4(t, pid, statusAddr, options, rusageAddr) + return n, nil, err +} + +// WaitPid implements linux syscall waitpid(2). +func WaitPid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + pid := int(args[0].Int()) + statusAddr := args[1].Pointer() + options := int(args[2].Uint()) + + n, err := wait4(t, pid, statusAddr, options, 0) + return n, nil, err +} + +// Waitid implements linux syscall waitid(2). +func Waitid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + idtype := args[0].Int() + id := args[1].Int() + infop := args[2].Pointer() + options := int(args[3].Uint()) + rusageAddr := args[4].Pointer() + + if options&^(syscall.WNOHANG|syscall.WEXITED|syscall.WSTOPPED|syscall.WCONTINUED|syscall.WNOWAIT) != 0 { + return 0, nil, syscall.EINVAL + } + if options&(syscall.WEXITED|syscall.WSTOPPED|syscall.WCONTINUED) == 0 { + return 0, nil, syscall.EINVAL + } + wopts := kernel.WaitOptions{ + NonCloneTasks: true, + Events: kernel.EventTraceeStop, + ConsumeEvent: options&syscall.WNOWAIT == 0, + } + switch idtype { + case _P_ALL: + case _P_PID: + wopts.SpecificTID = kernel.ThreadID(id) + case _P_PGID: + wopts.SpecificPGID = kernel.ProcessGroupID(id) + default: + return 0, nil, syscall.EINVAL + } + if options&syscall.WEXITED != 0 { + wopts.Events |= kernel.EventExit + } + if options&syscall.WSTOPPED != 0 { + wopts.Events |= kernel.EventChildGroupStop + } + if options&syscall.WCONTINUED != 0 { + wopts.Events |= kernel.EventGroupContinue + } + if options&syscall.WNOHANG == 0 { + wopts.BlockInterruptErr = kernel.ERESTARTSYS + } + + wr, err := t.Wait(&wopts) + if err != nil { + if err == kernel.ErrNoWaitableEvent { + err = nil + // "If WNOHANG was specified in options and there were no children + // in a waitable state, then waitid() returns 0 immediately and the + // state of the siginfo_t structure pointed to by infop is + // unspecified." - waitid(2). But Linux's waitid actually zeroes + // out the fields it would set for a successful waitid in this case + // as well. + if infop != 0 { + var si arch.SignalInfo + _, err = t.CopyOut(infop, &si) + } + } + return 0, nil, err + } + if rusageAddr != 0 { + ru := getrusage(wr.Task, linux.RUSAGE_BOTH) + if _, err := t.CopyOut(rusageAddr, &ru); err != nil { + return 0, nil, err + } + } + if infop == 0 { + return 0, nil, nil + } + si := arch.SignalInfo{ + Signo: int32(syscall.SIGCHLD), + } + si.SetPid(int32(wr.TID)) + si.SetUid(int32(wr.UID)) + // TODO: convert kernel.ExitStatus to functions and make + // WaitResult.Status a linux.WaitStatus + s := syscall.WaitStatus(wr.Status) + switch { + case s.Exited(): + si.Code = arch.CLD_EXITED + si.SetStatus(int32(s.ExitStatus())) + case s.Signaled(): + si.Code = arch.CLD_KILLED + si.SetStatus(int32(s.Signal())) + case s.CoreDump(): + si.Code = arch.CLD_DUMPED + si.SetStatus(int32(s.Signal())) + case s.Stopped(): + if wr.Event == kernel.EventTraceeStop { + si.Code = arch.CLD_TRAPPED + si.SetStatus(int32(s.TrapCause())) + } else { + si.Code = arch.CLD_STOPPED + si.SetStatus(int32(s.StopSignal())) + } + case s.Continued(): + si.Code = arch.CLD_CONTINUED + si.SetStatus(int32(syscall.SIGCONT)) + default: + t.Warningf("waitid got incomprehensible wait status %d", s) + } + _, err = t.CopyOut(infop, &si) + return 0, nil, err +} + +// SetTidAddress implements linux syscall set_tid_address(2). +func SetTidAddress(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + // Always succeed, return caller's tid. + t.SetClearTID(addr) + return uintptr(t.ThreadID()), nil, nil +} + +// Unshare implements linux syscall unshare(2). +func Unshare(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + flags := args[0].Int() + opts := kernel.SharingOptions{ + NewAddressSpace: flags&syscall.CLONE_VM == syscall.CLONE_VM, + NewSignalHandlers: flags&syscall.CLONE_SIGHAND == syscall.CLONE_SIGHAND, + NewThreadGroup: flags&syscall.CLONE_THREAD == syscall.CLONE_THREAD, + NewPIDNamespace: flags&syscall.CLONE_NEWPID == syscall.CLONE_NEWPID, + NewUserNamespace: flags&syscall.CLONE_NEWUSER == syscall.CLONE_NEWUSER, + NewNetworkNamespace: flags&syscall.CLONE_NEWNET == syscall.CLONE_NEWNET, + NewFiles: flags&syscall.CLONE_FILES == syscall.CLONE_FILES, + NewFSContext: flags&syscall.CLONE_FS == syscall.CLONE_FS, + NewUTSNamespace: flags&syscall.CLONE_NEWUTS == syscall.CLONE_NEWUTS, + NewIPCNamespace: flags&syscall.CLONE_NEWIPC == syscall.CLONE_NEWIPC, + } + // "CLONE_NEWPID automatically implies CLONE_THREAD as well." - unshare(2) + if opts.NewPIDNamespace { + opts.NewThreadGroup = true + } + // "... specifying CLONE_NEWUSER automatically implies CLONE_THREAD. Since + // Linux 3.9, CLONE_NEWUSER also automatically implies CLONE_FS." + if opts.NewUserNamespace { + opts.NewThreadGroup = true + opts.NewFSContext = true + } + return 0, nil, t.Unshare(&opts) +} + +// SchedYield implements linux syscall sched_yield(2). +func SchedYield(t *kernel.Task, _ arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + t.Yield() + return 0, nil, nil +} + +// SchedSetaffinity implements linux syscall sched_setaffinity(2). +func SchedSetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := args[0].Int() + size := args[1].SizeT() + maskAddr := args[2].Pointer() + + var task *kernel.Task + if tid == 0 { + task = t + } else { + task = t.PIDNamespace().TaskWithID(kernel.ThreadID(tid)) + if task == nil { + return 0, nil, syserror.ESRCH + } + } + + mask := sched.NewCPUSet(t.Kernel().ApplicationCores()) + if size > mask.Size() { + size = mask.Size() + } + if _, err := t.CopyInBytes(maskAddr, mask[:size]); err != nil { + return 0, nil, err + } + return 0, nil, task.SetCPUMask(mask) +} + +// SchedGetaffinity implements linux syscall sched_getaffinity(2). +func SchedGetaffinity(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := args[0].Int() + size := args[1].SizeT() + maskAddr := args[2].Pointer() + + // This limitation is because linux stores the cpumask + // in an array of "unsigned long" so the buffer needs to + // be a multiple of the word size. + if size&(t.Arch().Width()-1) > 0 { + return 0, nil, syserror.EINVAL + } + + var task *kernel.Task + if tid == 0 { + task = t + } else { + task = t.PIDNamespace().TaskWithID(kernel.ThreadID(tid)) + if task == nil { + return 0, nil, syserror.ESRCH + } + } + + mask := task.CPUMask() + // The buffer needs to be big enough to hold a cpumask with + // all possible cpus. + if size < mask.Size() { + return 0, nil, syserror.EINVAL + } + _, err := t.CopyOutBytes(maskAddr, mask) + + // NOTE: The syscall interface is slightly different than the glibc + // interface. The raw sched_getaffinity syscall returns the number of + // bytes used to represent a cpu mask. + return uintptr(mask.Size()), nil, err +} + +// Getcpu implements linux syscall getcpu(2). +func Getcpu(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + cpu := args[0].Pointer() + node := args[1].Pointer() + // third argument to this system call is nowadays unused. + + if cpu != 0 { + buf := t.CopyScratchBuffer(4) + usermem.ByteOrder.PutUint32(buf, uint32(t.CPU())) + if _, err := t.CopyOutBytes(cpu, buf); err != nil { + return 0, nil, err + } + } + // We always return node 0. + if node != 0 { + if _, err := t.MemoryManager().ZeroOut(t, node, 4, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return 0, nil, err + } + } + return 0, nil, nil +} + +// Setpgid implements the linux syscall setpgid(2). +func Setpgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + // Note that throughout this function, pgid is interpreted with respect + // to t's namespace, not with respect to the selected ThreadGroup's + // namespace (which may be different). + pid := kernel.ThreadID(args[0].Int()) + pgid := kernel.ProcessGroupID(args[1].Int()) + + // "If pid is zero, then the process ID of the calling process is used." + tg := t.ThreadGroup() + if pid != 0 { + ot := t.PIDNamespace().TaskWithID(pid) + if ot == nil { + return 0, nil, syserror.ESRCH + } + tg = ot.ThreadGroup() + if tg.Leader() != ot { + return 0, nil, syserror.EINVAL + } + + // Setpgid only operates on child threadgroups. + if tg != t.ThreadGroup() && (tg.Leader().Parent() == nil || tg.Leader().Parent().ThreadGroup() != t.ThreadGroup()) { + return 0, nil, syserror.ESRCH + } + } + + // "If pgid is zero, then the PGID of the process specified by pid is made + // the same as its process ID." + defaultPGID := kernel.ProcessGroupID(t.PIDNamespace().IDOfThreadGroup(tg)) + if pgid == 0 { + pgid = defaultPGID + } else if pgid < 0 { + return 0, nil, syserror.EINVAL + } + + // If the pgid is the same as the group, then create a new one. Otherwise, + // we attempt to join an existing process group. + if pgid == defaultPGID { + // For convenience, errors line up with Linux syscall API. + if err := tg.CreateProcessGroup(); err != nil { + // Is the process group already as expected? If so, + // just return success. This is the same behavior as + // Linux. + if t.PIDNamespace().IDOfProcessGroup(tg.ProcessGroup()) == defaultPGID { + return 0, nil, nil + } + return 0, nil, err + } + } else { + // Same as CreateProcessGroup, above. + if err := tg.JoinProcessGroup(t.PIDNamespace(), pgid, tg != t.ThreadGroup()); err != nil { + // See above. + if t.PIDNamespace().IDOfProcessGroup(tg.ProcessGroup()) == pgid { + return 0, nil, nil + } + return 0, nil, err + } + } + + // Success. + return 0, nil, nil +} + +// Getpgrp implements the linux syscall getpgrp(2). +func Getpgrp(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return uintptr(t.PIDNamespace().IDOfProcessGroup(t.ThreadGroup().ProcessGroup())), nil, nil +} + +// Getpgid implements the linux syscall getpgid(2). +func Getpgid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := kernel.ThreadID(args[0].Int()) + if tid == 0 { + return Getpgrp(t, args) + } + + target := t.PIDNamespace().TaskWithID(tid) + if target == nil { + return 0, nil, syserror.ESRCH + } + + return uintptr(t.PIDNamespace().IDOfProcessGroup(target.ThreadGroup().ProcessGroup())), nil, nil +} + +// Setsid implements the linux syscall setsid(2). +func Setsid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, t.ThreadGroup().CreateSession() +} + +// Getsid implements the linux syscall getsid(2). +func Getsid(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tid := kernel.ThreadID(args[0].Int()) + if tid == 0 { + return uintptr(t.PIDNamespace().IDOfSession(t.ThreadGroup().Session())), nil, nil + } + + target := t.PIDNamespace().TaskWithID(tid) + if target == nil { + return 0, nil, syserror.ESRCH + } + + return uintptr(t.PIDNamespace().IDOfSession(target.ThreadGroup().Session())), nil, nil +} + +// Getpriority pretends to implement the linux syscall getpriority(2). +// +// This is a stub; real priorities require a full scheduler. +func Getpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + which := args[0].Int() + who := kernel.ThreadID(args[1].Int()) + + switch which { + case syscall.PRIO_PROCESS: + // Look for who, return ESRCH if not found. + var task *kernel.Task + if who == 0 { + task = t + } else { + task = t.PIDNamespace().TaskWithID(who) + } + + if task == nil { + return 0, nil, syscall.ESRCH + } + + // From kernel/sys.c:getpriority: + // "To avoid negative return values, 'getpriority()' + // will not return the normal nice-value, but a negated + // value that has been offset by 20" + return uintptr(20 - task.Niceness()), nil, nil + case syscall.PRIO_USER: + fallthrough + case syscall.PRIO_PGRP: + // PRIO_USER and PRIO_PGRP have no further implementation yet. + return 0, nil, nil + default: + return 0, nil, syscall.EINVAL + } +} + +// Setpriority pretends to implement the linux syscall setpriority(2). +// +// This is a stub; real priorities require a full scheduler. +func Setpriority(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + which := args[0].Int() + who := kernel.ThreadID(args[1].Int()) + niceval := int(args[2].Int()) + + // In the kernel's implementation, values outside the range + // of [-20, 19] are truncated to these minimum and maximum + // values. + if niceval < -20 /* min niceval */ { + niceval = -20 + } else if niceval > 19 /* max niceval */ { + niceval = 19 + } + + switch which { + case syscall.PRIO_PROCESS: + // Look for who, return ESRCH if not found. + var task *kernel.Task + if who == 0 { + task = t + } else { + task = t.PIDNamespace().TaskWithID(who) + } + + if task == nil { + return 0, nil, syscall.ESRCH + } + + task.SetNiceness(niceval) + case syscall.PRIO_USER: + fallthrough + case syscall.PRIO_PGRP: + // PRIO_USER and PRIO_PGRP have no further implementation yet. + return 0, nil, nil + default: + return 0, nil, syscall.EINVAL + } + + return 0, nil, nil +} + +// Ptrace implements linux system call ptrace(2). +func Ptrace(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + req := args[0].Int64() + pid := kernel.ThreadID(args[1].Int()) + addr := args[2].Pointer() + data := args[3].Pointer() + + return 0, nil, t.Ptrace(req, pid, addr, data) +} diff --git a/pkg/sentry/syscalls/linux/sys_time.go b/pkg/sentry/syscalls/linux/sys_time.go new file mode 100644 index 000000000..dcee694b2 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_time.go @@ -0,0 +1,338 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// The most significant 29 bits hold either a pid or a file descriptor. +func pidOfClockID(c int32) kernel.ThreadID { + return kernel.ThreadID(^(c >> 3)) +} + +// whichCPUClock returns one of CPUCLOCK_PERF, CPUCLOCK_VIRT, CPUCLOCK_SCHED or +// CLOCK_FD. +func whichCPUClock(c int32) int32 { + return c & linux.CPUCLOCK_CLOCK_MASK +} + +// isCPUClockPerThread returns true if the CPUCLOCK_PERTHREAD bit is set in the +// clock id. +func isCPUClockPerThread(c int32) bool { + return c&linux.CPUCLOCK_PERTHREAD_MASK != 0 +} + +// isValidCPUClock returns checks that the cpu clock id is valid. +func isValidCPUClock(c int32) bool { + // Bits 0, 1, and 2 cannot all be set. + if c&7 == 7 { + return false + } + if whichCPUClock(c) >= linux.CPUCLOCK_MAX { + return false + } + return true +} + +// targetTask returns the kernel.Task for the given clock id. +func targetTask(t *kernel.Task, c int32) *kernel.Task { + pid := pidOfClockID(c) + if pid == 0 { + return t + } + return t.PIDNamespace().TaskWithID(pid) +} + +// ClockGetres implements linux syscall clock_getres(2). +func ClockGetres(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + clockID := int32(args[0].Int()) + addr := args[1].Pointer() + r := linux.Timespec{ + Sec: 0, + Nsec: 1, + } + + if _, err := getClock(t, clockID); err != nil { + return 0, nil, syserror.EINVAL + } + + if addr == 0 { + // Don't need to copy out. + return 0, nil, nil + } + + return 0, nil, copyTimespecOut(t, addr, &r) +} + +type cpuClocker interface { + UserCPUClock() ktime.Clock + CPUClock() ktime.Clock +} + +func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) { + if clockID < 0 { + if !isValidCPUClock(clockID) { + return nil, syserror.EINVAL + } + + targetTask := targetTask(t, clockID) + if targetTask == nil { + return nil, syserror.EINVAL + } + + var target cpuClocker + if isCPUClockPerThread(clockID) { + target = targetTask + } else { + target = targetTask.ThreadGroup() + } + + switch whichCPUClock(clockID) { + case linux.CPUCLOCK_VIRT: + return target.UserCPUClock(), nil + case linux.CPUCLOCK_PROF, linux.CPUCLOCK_SCHED: + // CPUCLOCK_SCHED is approximated by CPUCLOCK_PROF. + return target.CPUClock(), nil + default: + return nil, syserror.EINVAL + } + } + + switch clockID { + case linux.CLOCK_REALTIME, linux.CLOCK_REALTIME_COARSE: + return t.Kernel().RealtimeClock(), nil + case linux.CLOCK_MONOTONIC, linux.CLOCK_MONOTONIC_COARSE, linux.CLOCK_MONOTONIC_RAW: + // CLOCK_MONOTONIC approximates CLOCK_MONOTONIC_RAW. + return t.Kernel().MonotonicClock(), nil + case linux.CLOCK_PROCESS_CPUTIME_ID: + return t.ThreadGroup().CPUClock(), nil + case linux.CLOCK_THREAD_CPUTIME_ID: + return t.CPUClock(), nil + default: + return nil, syserror.EINVAL + } +} + +// ClockGettime implements linux syscall clock_gettime(2). +func ClockGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + clockID := int32(args[0].Int()) + addr := args[1].Pointer() + + c, err := getClock(t, clockID) + if err != nil { + return 0, nil, err + } + ts := c.Now().Timespec() + return 0, nil, copyTimespecOut(t, addr, &ts) +} + +// ClockSettime implements linux syscall clock_settime(2). +func ClockSettime(*kernel.Task, arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, syserror.EPERM +} + +// Time implements linux syscall time(2). +func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + + r := t.Kernel().RealtimeClock().Now().TimeT() + if addr == usermem.Addr(0) { + return uintptr(r), nil, nil + } + + if _, err := t.CopyOut(addr, r); err != nil { + return 0, nil, err + } + return uintptr(r), nil, nil +} + +// clockNanosleepRestartBlock encapsulates the state required to restart +// clock_nanosleep(2) via restart_syscall(2). +type clockNanosleepRestartBlock struct { + c ktime.Clock + duration time.Duration + rem usermem.Addr +} + +// Restart implements kernel.SyscallRestartBlock.Restart. +func (n *clockNanosleepRestartBlock) Restart(t *kernel.Task) (uintptr, error) { + return 0, clockNanosleepFor(t, n.c, n.duration, n.rem) +} + +// clockNanosleepUntil blocks until a specified time. +// +// If blocking is interrupted, the syscall is restarted with the original +// arguments. +func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, ts linux.Timespec) error { + notifier, tchan := ktime.NewChannelNotifier() + timer := ktime.NewTimer(c, notifier) + + // Turn on the timer. + timer.Swap(ktime.Setting{ + Period: 0, + Enabled: true, + Next: ktime.FromTimespec(ts), + }) + + err := t.BlockWithTimer(nil, tchan) + + timer.Destroy() + + // Did we just block until the timeout happened? + if err == syserror.ETIMEDOUT { + return nil + } + + return syserror.ConvertIntr(err, kernel.ERESTARTNOHAND) +} + +// clockNanosleepFor blocks for a specified duration. +// +// If blocking is interrupted, the syscall is restarted with the remaining +// duration timeout. +func clockNanosleepFor(t *kernel.Task, c ktime.Clock, dur time.Duration, rem usermem.Addr) error { + timer, start, tchan := ktime.After(c, dur) + + err := t.BlockWithTimer(nil, tchan) + + after := c.Now() + + timer.Destroy() + + var remaining time.Duration + // Did we just block for the entire duration? + if err == syserror.ETIMEDOUT { + remaining = 0 + } else { + remaining = dur - after.Sub(start) + if remaining < 0 { + remaining = time.Duration(0) + } + } + + // Copy out remaining time. + if err != nil && rem != usermem.Addr(0) { + timeleft := linux.NsecToTimespec(remaining.Nanoseconds()) + if err := copyTimespecOut(t, rem, &timeleft); err != nil { + return err + } + } + + // Did we just block for the entire duration? + if err == syserror.ETIMEDOUT { + return nil + } + + // If interrupted, arrange for a restart with the remaining duration. + if err == syserror.ErrInterrupted { + t.SetSyscallRestartBlock(&clockNanosleepRestartBlock{ + c: c, + duration: remaining, + rem: rem, + }) + return kernel.ERESTART_RESTARTBLOCK + } + + return err +} + +// Nanosleep implements linux syscall Nanosleep(2). +func Nanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + rem := args[1].Pointer() + + ts, err := copyTimespecIn(t, addr) + if err != nil { + return 0, nil, err + } + + if !ts.Valid() { + return 0, nil, syserror.EINVAL + } + + // Just like linux, we cap the timeout with the max number that int64 can + // represent which is roughly 292 years. + dur := time.Duration(ts.ToNsecCapped()) * time.Nanosecond + return 0, nil, clockNanosleepFor(t, t.Kernel().MonotonicClock(), dur, rem) +} + +// ClockNanosleep implements linux syscall clock_nanosleep(2). +func ClockNanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + clockID := int32(args[0].Int()) + flags := args[1].Int() + addr := args[2].Pointer() + rem := args[3].Pointer() + + req, err := copyTimespecIn(t, addr) + if err != nil { + return 0, nil, err + } + + if !req.Valid() { + return 0, nil, syserror.EINVAL + } + + // Only allow clock constants also allowed by Linux. + if clockID > 0 { + if clockID != linux.CLOCK_REALTIME && + clockID != linux.CLOCK_MONOTONIC && + clockID != linux.CLOCK_PROCESS_CPUTIME_ID { + return 0, nil, syserror.EINVAL + } + } + + c, err := getClock(t, clockID) + if err != nil { + return 0, nil, err + } + + if flags&linux.TIMER_ABSTIME != 0 { + return 0, nil, clockNanosleepUntil(t, c, req) + } + + dur := time.Duration(req.ToNsecCapped()) * time.Nanosecond + return 0, nil, clockNanosleepFor(t, c, dur, rem) +} + +// Gettimeofday implements linux syscall gettimeofday(2). +func Gettimeofday(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + tv := args[0].Pointer() + tz := args[1].Pointer() + + if tv != usermem.Addr(0) { + nowTv := t.Kernel().RealtimeClock().Now().Timeval() + if err := copyTimevalOut(t, tv, &nowTv); err != nil { + return 0, nil, err + } + } + + if tz != usermem.Addr(0) { + // Ask the time package for the timezone. + _, offset := time.Now().Zone() + // This int32 array mimics linux's struct timezone. + timezone := [2]int32{-int32(offset) / 60, 0} + _, err := t.CopyOut(tz, timezone) + return 0, nil, err + } + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_timer.go b/pkg/sentry/syscalls/linux/sys_timer.go new file mode 100644 index 000000000..4ed077626 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_timer.go @@ -0,0 +1,168 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" +) + +// ItimerType denotes the type of interval timer. +type ItimerType int + +// Interval timer types from <sys/time.h>. +const ( + // ItimerReal equals to ITIMER_REAL. + ItimerReal ItimerType = iota + // ItimerVirtual equals to ITIMER_VIRTUAL. + ItimerVirtual + // ItimerProf equals to ITIMER_PROF. + ItimerProf +) + +const nsecPerSec = int64(time.Second) + +// copyItimerValIn copies an ItimerVal from the untrusted app range to the +// kernel. The ItimerVal may be either 32 or 64 bits. +// A NULL address is allowed because because Linux allows +// setitimer(which, NULL, &old_value) which disables the timer. +// There is a KERN_WARN message saying this misfeature will be removed. +// However, that hasn't happened as of 3.19, so we continue to support it. +func copyItimerValIn(t *kernel.Task, addr usermem.Addr) (linux.ItimerVal, error) { + if addr == usermem.Addr(0) { + return linux.ItimerVal{}, nil + } + + switch t.Arch().Width() { + case 8: + // Native size, just copy directly. + var itv linux.ItimerVal + if _, err := t.CopyIn(addr, &itv); err != nil { + return linux.ItimerVal{}, err + } + + return itv, nil + default: + return linux.ItimerVal{}, syscall.ENOSYS + } +} + +// copyItimerValOut copies an ItimerVal to the untrusted app range. +// The ItimerVal may be either 32 or 64 bits. +// A NULL address is allowed, in which case no copy takes place +func copyItimerValOut(t *kernel.Task, addr usermem.Addr, itv *linux.ItimerVal) error { + if addr == usermem.Addr(0) { + return nil + } + + switch t.Arch().Width() { + case 8: + // Native size, just copy directly. + _, err := t.CopyOut(addr, itv) + return err + default: + return syscall.ENOSYS + } +} + +func findTimer(t *kernel.Task, w ItimerType) (*ktime.Timer, error) { + switch w { + case ItimerReal: + return t.ThreadGroup().Timer().RealTimer, nil + case ItimerVirtual: + return t.ThreadGroup().Timer().VirtualTimer, nil + case ItimerProf: + return t.ThreadGroup().Timer().ProfTimer, nil + default: + return nil, syscall.EINVAL + } +} + +// Getitimer implements linux syscall getitimer(2). +func Getitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + timerID := ItimerType(args[0].Int()) + val := args[1].Pointer() + + timer, err := findTimer(t, timerID) + if err != nil { + return 0, nil, err + } + value, interval := ktime.SpecFromSetting(timer.Get()) + olditv := linux.ItimerVal{ + Value: linux.DurationToTimeval(value), + Interval: linux.DurationToTimeval(interval), + } + + return 0, nil, copyItimerValOut(t, val, &olditv) +} + +// Setitimer implements linux syscall setitimer(2). +func Setitimer(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + timerID := ItimerType(args[0].Int()) + newVal := args[1].Pointer() + oldVal := args[2].Pointer() + + timer, err := findTimer(t, timerID) + if err != nil { + return 0, nil, err + } + + itv, err := copyItimerValIn(t, newVal) + if err != nil { + return 0, nil, err + } + // Just like linux, we cap the timer value and interval with the max + // number that int64 can represent which is roughly 292 years. + s, err := ktime.SettingFromSpec(itv.Value.ToDuration(), + itv.Interval.ToDuration(), timer.Clock()) + if err != nil { + return 0, nil, err + } + + valueNS, intervalNS := ktime.SpecFromSetting(timer.Swap(s)) + olditv := linux.ItimerVal{ + Value: linux.DurationToTimeval(valueNS), + Interval: linux.DurationToTimeval(intervalNS), + } + + return 0, nil, copyItimerValOut(t, oldVal, &olditv) +} + +// Alarm implements linux syscall alarm(2). +func Alarm(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + duration := time.Duration(args[0].Uint()) * time.Second + + timer := t.ThreadGroup().Timer().RealTimer + s, err := ktime.SettingFromSpec(duration, 0, timer.Clock()) + if err != nil { + return 0, nil, err + } + + value, _ := ktime.SpecFromSetting(timer.Swap(s)) + sec := int64(value) / nsecPerSec + nsec := int64(value) % nsecPerSec + // We can't return 0 if we have an alarm pending ... + if (sec == 0 && nsec > 0) || nsec >= nsecPerSec/2 { + sec++ + } + + return uintptr(sec), nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_timerfd.go b/pkg/sentry/syscalls/linux/sys_timerfd.go new file mode 100644 index 000000000..cb81d42b9 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_timerfd.go @@ -0,0 +1,135 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs/timerfd" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// TimerfdCreate implements Linux syscall timerfd_create(2). +func TimerfdCreate(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + clockID := args[0].Int() + flags := args[1].Int() + + if flags&^(linux.TFD_CLOEXEC|linux.TFD_NONBLOCK) != 0 { + return 0, nil, syserror.EINVAL + } + + var c ktime.Clock + switch clockID { + case linux.CLOCK_REALTIME: + c = t.Kernel().RealtimeClock() + case linux.CLOCK_MONOTONIC: + c = t.Kernel().MonotonicClock() + default: + return 0, nil, syserror.EINVAL + } + f := timerfd.NewFile(t, c) + defer f.DecRef() + f.SetFlags(fs.SettableFileFlags{ + NonBlocking: flags&linux.TFD_NONBLOCK != 0, + }) + + fd, err := t.FDMap().NewFDFrom(0, f, kernel.FDFlags{ + CloseOnExec: flags&linux.TFD_CLOEXEC != 0, + }, t.ThreadGroup().Limits()) + if err != nil { + return 0, nil, err + } + + return uintptr(fd), nil, nil +} + +// TimerfdSettime implements Linux syscall timerfd_settime(2). +func TimerfdSettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + flags := args[1].Int() + newValAddr := args[2].Pointer() + oldValAddr := args[3].Pointer() + + if flags&^(linux.TFD_TIMER_ABSTIME) != 0 { + return 0, nil, syserror.EINVAL + } + + f := t.FDMap().GetFile(fd) + if f == nil { + return 0, nil, syserror.EBADF + } + defer f.DecRef() + + tf, ok := f.FileOperations.(*timerfd.TimerOperations) + if !ok { + return 0, nil, syserror.EINVAL + } + + var newVal linux.Itimerspec + if _, err := t.CopyIn(newValAddr, &newVal); err != nil { + return 0, nil, err + } + var s ktime.Setting + var err error + if flags&linux.TFD_TIMER_ABSTIME != 0 { + s, err = ktime.SettingFromAbsSpec(ktime.FromTimespec(newVal.Value), + newVal.Interval.ToDuration()) + } else { + s, err = ktime.SettingFromSpec(newVal.Value.ToDuration(), + newVal.Interval.ToDuration(), tf.Clock()) + } + if err != nil { + return 0, nil, err + } + valueNS, intervalNS := ktime.SpecFromSetting(tf.SetTime(s)) + if oldValAddr == 0 { + return 0, nil, nil + } + oldVal := linux.Itimerspec{ + Interval: linux.DurationToTimespec(intervalNS), + Value: linux.DurationToTimespec(valueNS), + } + _, err = t.CopyOut(oldValAddr, &oldVal) + return 0, nil, err +} + +// TimerfdGettime implements Linux syscall timerfd_gettime(2). +func TimerfdGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + curValAddr := args[1].Pointer() + + f := t.FDMap().GetFile(fd) + if f == nil { + return 0, nil, syserror.EBADF + } + defer f.DecRef() + + tf, ok := f.FileOperations.(*timerfd.TimerOperations) + if !ok { + return 0, nil, syserror.EINVAL + } + + valueNS, intervalNS := ktime.SpecFromSetting(tf.GetTime()) + curVal := linux.Itimerspec{ + Interval: linux.DurationToTimespec(intervalNS), + Value: linux.DurationToTimespec(valueNS), + } + _, err := t.CopyOut(curValAddr, &curVal) + return 0, nil, err +} diff --git a/pkg/sentry/syscalls/linux/sys_tls.go b/pkg/sentry/syscalls/linux/sys_tls.go new file mode 100644 index 000000000..1047364b3 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_tls.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build amd64 + +package linux + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" +) + +// ArchPrctl implements linux syscall arch_prctl(2). +// It sets architecture-specific process or thread state for t. +func ArchPrctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + switch args[0].Int() { + case linux.ARCH_GET_FS: + addr := args[1].Pointer() + _, err := t.CopyOut(addr, &t.Arch().StateData().Regs.Fs_base) + if err != nil { + return 0, nil, err + } + + case linux.ARCH_SET_FS: + regs := &t.Arch().StateData().Regs + regs.Fs = 0 + regs.Fs_base = args[1].Uint64() + + default: + return 0, nil, syscall.EINVAL + } + + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_utsname.go b/pkg/sentry/syscalls/linux/sys_utsname.go new file mode 100644 index 000000000..899116374 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_utsname.go @@ -0,0 +1,89 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Uname implements linux syscall uname. +func Uname(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + version := t.SyscallTable().Version + + uts := t.UTSNamespace() + + // Fill in structure fields. + var u linux.UtsName + copy(u.Sysname[:], version.Sysname) + copy(u.Nodename[:], uts.HostName()) + copy(u.Release[:], version.Release) + copy(u.Version[:], version.Version) + copy(u.Machine[:], "x86_64") // +build tag above. + copy(u.Domainname[:], uts.DomainName()) + + // Copy out the result. + va := args[0].Pointer() + _, err := t.CopyOut(va, u) + return 0, nil, err +} + +// Setdomainname implements Linux syscall setdomainname. +func Setdomainname(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + nameAddr := args[0].Pointer() + size := args[1].Int() + + utsns := t.UTSNamespace() + if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) { + return 0, nil, syserror.EPERM + } + if size < 0 || size > linux.UTSLen { + return 0, nil, syserror.EINVAL + } + + name, err := t.CopyInString(nameAddr, int(size)) + if err != nil { + return 0, nil, err + } + + utsns.SetDomainName(name) + return 0, nil, nil +} + +// Sethostname implements Linux syscall sethostname. +func Sethostname(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + nameAddr := args[0].Pointer() + size := args[1].Int() + + utsns := t.UTSNamespace() + if !t.HasCapabilityIn(linux.CAP_SYS_ADMIN, utsns.UserNamespace()) { + return 0, nil, syserror.EPERM + } + if size < 0 || size > linux.UTSLen { + return 0, nil, syserror.EINVAL + } + + name, err := t.CopyInString(nameAddr, int(size)) + if err != nil { + return 0, nil, err + } + + utsns.SetHostName(name) + return 0, nil, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_write.go b/pkg/sentry/syscalls/linux/sys_write.go new file mode 100644 index 000000000..caa7b01ea --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_write.go @@ -0,0 +1,274 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // EventMaskWrite contains events that can be triggered on writes. + // + // Note that EventHUp is not going to happen for pipes but may for + // implementations of poll on some sockets, see net/core/datagram.c. + EventMaskWrite = waiter.EventOut | waiter.EventHUp | waiter.EventErr +) + +// Write implements linux syscall write(2). +func Write(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := args[2].SizeT() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the file is writable. + if !file.Flags().Write { + return 0, nil, syserror.EBADF + } + + // Check that the size is legitimate. + si := int(size) + if si < 0 { + return 0, nil, syserror.EINVAL + } + + // Get the source of the write. + src, err := t.SingleIOSequence(addr, si, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := writev(t, file, src) + t.IOUsage().AccountWriteSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "write", file) +} + +// Pwrite64 implements linux syscall pwrite64(2). +func Pwrite64(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + size := args[2].SizeT() + offset := args[3].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the offset is legitimate. + if offset < 0 { + return 0, nil, syserror.EINVAL + } + + // Is writing at an offset supported? + if !file.Flags().Pwrite { + return 0, nil, syserror.ESPIPE + } + + // Check that the file is writable. + if !file.Flags().Write { + return 0, nil, syserror.EBADF + } + + // Check that the size is legitimate. + si := int(size) + if si < 0 { + return 0, nil, syserror.EINVAL + } + + // Get the source of the write. + src, err := t.SingleIOSequence(addr, si, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := pwritev(t, file, src, offset) + t.IOUsage().AccountWriteSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "pwrite64", file) +} + +// Writev implements linux syscall writev(2). +func Writev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + iovcnt := int(args[2].Int()) + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the file is writable. + if !file.Flags().Write { + return 0, nil, syserror.EBADF + } + + // Read the iovecs that specify the source of the write. + src, err := t.IovecsIOSequence(addr, iovcnt, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := writev(t, file, src) + t.IOUsage().AccountWriteSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "writev", file) +} + +// Pwritev implements linux syscall pwritev(2). +func Pwritev(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + fd := kdefs.FD(args[0].Int()) + addr := args[1].Pointer() + iovcnt := int(args[2].Int()) + offset := args[3].Int64() + + file := t.FDMap().GetFile(fd) + if file == nil { + return 0, nil, syserror.EBADF + } + defer file.DecRef() + + // Check that the offset is legitimate. + if offset < 0 { + return 0, nil, syserror.EINVAL + } + + // Is writing at an offset supported? + if !file.Flags().Pwrite { + return 0, nil, syserror.ESPIPE + } + + // Check that the file is writable. + if !file.Flags().Write { + return 0, nil, syserror.EBADF + } + + // Read the iovecs that specify the source of the write. + src, err := t.IovecsIOSequence(addr, iovcnt, usermem.IOOpts{ + AddressSpaceActive: true, + }) + if err != nil { + return 0, nil, err + } + + n, err := pwritev(t, file, src, offset) + t.IOUsage().AccountWriteSyscall(n) + return uintptr(n), nil, handleIOError(t, n != 0, err, kernel.ERESTARTSYS, "pwritev", file) +} + +func writev(t *kernel.Task, f *fs.File, src usermem.IOSequence) (int64, error) { + n, err := f.Writev(t, src) + if err != syserror.ErrWouldBlock || f.Flags().NonBlocking { + if n > 0 { + // Queue notification if we wrote anything. + f.Dirent.InotifyEvent(linux.IN_MODIFY, 0) + } + return n, err + } + + // Register for notifications. + w, ch := waiter.NewChannelEntry(nil) + f.EventRegister(&w, EventMaskWrite) + + total := n + for { + // Shorten src to reflect bytes previously written. + src = src.DropFirst64(n) + + // Issue the request and break out if it completes with + // anything other than "would block". + n, err = f.Writev(t, src) + total += n + if err != syserror.ErrWouldBlock { + break + } + + // Wait for a notification that we should retry. + if err = t.Block(ch); err != nil { + break + } + } + + f.EventUnregister(&w) + + if total > 0 { + // Queue notification if we wrote anything. + f.Dirent.InotifyEvent(linux.IN_MODIFY, 0) + } + + return total, err +} + +func pwritev(t *kernel.Task, f *fs.File, src usermem.IOSequence, offset int64) (int64, error) { + n, err := f.Pwritev(t, src, offset) + if err != syserror.ErrWouldBlock || f.Flags().NonBlocking { + if n > 0 { + // Queue notification if we wrote anything. + f.Dirent.InotifyEvent(linux.IN_MODIFY, 0) + } + return n, err + } + + // Register for notifications. + w, ch := waiter.NewChannelEntry(nil) + f.EventRegister(&w, EventMaskWrite) + + total := n + for { + // Shorten src to reflect bytes previously written. + src = src.DropFirst64(n) + + // Issue the request and break out if it completes with + // anything other than "would block". + n, err = f.Pwritev(t, src, offset+total) + total += n + if err != syserror.ErrWouldBlock { + break + } + + // Wait for a notification that we should retry. + if err = t.Block(ch); err != nil { + break + } + } + + f.EventUnregister(&w) + + if total > 0 { + // Queue notification if we wrote anything. + f.Dirent.InotifyEvent(linux.IN_MODIFY, 0) + } + + return total, err +} diff --git a/pkg/sentry/syscalls/linux/timespec.go b/pkg/sentry/syscalls/linux/timespec.go new file mode 100644 index 000000000..e865c6fc0 --- /dev/null +++ b/pkg/sentry/syscalls/linux/timespec.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// copyTimespecIn copies a Timespec from the untrusted app range to the kernel. +func copyTimespecIn(t *kernel.Task, addr usermem.Addr) (linux.Timespec, error) { + switch t.Arch().Width() { + case 8: + ts := linux.Timespec{} + in := t.CopyScratchBuffer(16) + _, err := t.CopyInBytes(addr, in) + if err != nil { + return ts, err + } + ts.Sec = int64(usermem.ByteOrder.Uint64(in[0:])) + ts.Nsec = int64(usermem.ByteOrder.Uint64(in[8:])) + return ts, nil + default: + return linux.Timespec{}, syserror.ENOSYS + } +} + +// copyTimespecOut copies a Timespec to the untrusted app range. +func copyTimespecOut(t *kernel.Task, addr usermem.Addr, ts *linux.Timespec) error { + switch t.Arch().Width() { + case 8: + out := t.CopyScratchBuffer(16) + usermem.ByteOrder.PutUint64(out[0:], uint64(ts.Sec)) + usermem.ByteOrder.PutUint64(out[8:], uint64(ts.Nsec)) + _, err := t.CopyOutBytes(addr, out) + return err + default: + return syserror.ENOSYS + } +} + +// copyTimevalIn copies a Timeval from the untrusted app range to the kernel. +func copyTimevalIn(t *kernel.Task, addr usermem.Addr) (linux.Timeval, error) { + switch t.Arch().Width() { + case 8: + tv := linux.Timeval{} + in := t.CopyScratchBuffer(16) + _, err := t.CopyInBytes(addr, in) + if err != nil { + return tv, err + } + tv.Sec = int64(usermem.ByteOrder.Uint64(in[0:])) + tv.Usec = int64(usermem.ByteOrder.Uint64(in[8:])) + return tv, nil + default: + return linux.Timeval{}, syscall.ENOSYS + } +} + +// copyTimevalOut copies a Timeval to the untrusted app range. +func copyTimevalOut(t *kernel.Task, addr usermem.Addr, tv *linux.Timeval) error { + switch t.Arch().Width() { + case 8: + out := t.CopyScratchBuffer(16) + usermem.ByteOrder.PutUint64(out[0:], uint64(tv.Sec)) + usermem.ByteOrder.PutUint64(out[8:], uint64(tv.Usec)) + _, err := t.CopyOutBytes(addr, out) + return err + default: + return syscall.ENOSYS + } +} + +// copyTimespecInToDuration copies a Timespec from the untrusted app range, +// validates it and converts it to a Duration. +// +// If the Timespec is larger than what can be represented in a Duration, the +// returned value is the maximum that Duration will allow. +// +// If timespecAddr is NULL, the returned value is negative. +func copyTimespecInToDuration(t *kernel.Task, timespecAddr usermem.Addr) (time.Duration, error) { + // Use a negative Duration to indicate "no timeout". + timeout := time.Duration(-1) + if timespecAddr != 0 { + timespec, err := copyTimespecIn(t, timespecAddr) + if err != nil { + return 0, err + } + if !timespec.Valid() { + return 0, syscall.EINVAL + } + timeout = time.Duration(timespec.ToNsecCapped()) + } + return timeout, nil +} diff --git a/pkg/sentry/syscalls/polling.go b/pkg/sentry/syscalls/polling.go new file mode 100644 index 000000000..fd90184ef --- /dev/null +++ b/pkg/sentry/syscalls/polling.go @@ -0,0 +1,137 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syscalls + +import ( + "syscall" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// PollFD describes a pollable FD. +type PollFD struct { + FD kdefs.FD + Events waiter.EventMask + REvents waiter.EventMask +} + +// pollState tracks the associated file descriptor and waiter of a PollFD. +type pollState struct { + file *fs.File + waiter waiter.Entry +} + +// initReadiness gets the current ready mask for the file represented by the FD +// stored in pfd.FD. If a channel is passed in, the waiter entry in "state" is +// used to register with the file for event notifications, and a reference to +// the file is stored in "state". +func (pfd *PollFD) initReadiness(t *kernel.Task, state *pollState, ch chan struct{}) { + if pfd.FD < 0 { + pfd.REvents = 0 + return + } + + file := t.FDMap().GetFile(pfd.FD) + if file == nil { + pfd.REvents = waiter.EventNVal + return + } + + if ch == nil { + defer file.DecRef() + } else { + state.file = file + state.waiter, _ = waiter.NewChannelEntry(ch) + file.EventRegister(&state.waiter, pfd.Events) + } + + pfd.REvents = file.Readiness(pfd.Events) & pfd.Events +} + +// releaseState releases all the pollState in "state". +func releaseState(state []pollState) { + for i := range state { + if state[i].file != nil { + state[i].file.EventUnregister(&state[i].waiter) + state[i].file.DecRef() + } + } +} + +// Poll polls the PollFDs in "pfd" with a bounded time specified in "timeout" +// when "timeout" is greater than zero. +// +// Poll returns the remaining timeout, which is always 0 on a timeout; and 0 or +// positive if interrupted by a signal. +func Poll(t *kernel.Task, pfd []PollFD, timeout time.Duration) (time.Duration, uintptr, error) { + var ch chan struct{} + if timeout != 0 { + ch = make(chan struct{}, 1) + } + + // Register for event notification in the files involved if we may + // block (timeout not zero). Once we find a file that has a non-zero + // result, we stop registering for events but still go through all files + // to get their ready masks. + state := make([]pollState, len(pfd)) + defer releaseState(state) + n := uintptr(0) + for i := range pfd { + pfd[i].initReadiness(t, &state[i], ch) + if pfd[i].REvents != 0 { + n++ + ch = nil + } + } + + if timeout == 0 { + return timeout, n, nil + } + + forever := timeout < 0 + + for n == 0 { + var err error + // Wait for a notification. + timeout, err = t.BlockWithTimeout(ch, !forever, timeout) + if err != nil { + if err == syscall.ETIMEDOUT { + err = nil + } + return timeout, 0, err + } + + // We got notified, count how many files are ready. If none, + // then this was a spurious notification, and we just go back + // to sleep with the remaining timeout. + for i := range state { + if state[i].file == nil { + continue + } + + ready := state[i].file.Readiness(pfd[i].Events) & pfd[i].Events + if ready != 0 { + pfd[i].REvents = ready + n++ + } + } + } + + return timeout, n, nil +} diff --git a/pkg/sentry/syscalls/syscalls.go b/pkg/sentry/syscalls/syscalls.go new file mode 100644 index 000000000..1176f858d --- /dev/null +++ b/pkg/sentry/syscalls/syscalls.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package syscalls is the interface from the application to the kernel. +// Traditionally, syscalls is the interface that is used by applications to +// request services from the kernel of a operating system. We provide a +// user-mode kernel that needs to handle those requests coming from unmodified +// applications. Therefore, we still use the term "syscalls" to denote this +// interface. +// +// Note that the stubs in this package may merely provide the interface, not +// the actual implementation. It just makes writing syscall stubs +// straightforward. +package syscalls + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/eventchannel" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + uspb "gvisor.googlesource.com/gvisor/pkg/sentry/syscalls/unimplemented_syscall_go_proto" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Error returns a syscall handler that will always give the passed error. +func Error(err error) kernel.SyscallFn { + return func(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + return 0, nil, err + } +} + +// ErrorWithEvent gives a syscall function that sends an unimplemented +// syscall event via the event channel and returns the passed error. +func ErrorWithEvent(err error) kernel.SyscallFn { + return func(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + UnimplementedEvent(t) + return 0, nil, err + } +} + +// CapError gives a syscall function that checks for capability c. If the task +// has the capability, it returns ENOSYS, otherwise EPERM. To unprivileged +// tasks, it will seem like there is an implementation. +func CapError(c linux.Capability) kernel.SyscallFn { + return func(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + if !t.HasCapability(c) { + return 0, nil, syserror.EPERM + } + UnimplementedEvent(t) + return 0, nil, syserror.ENOSYS + } +} + +// UnimplementedEvent emits an UnimplementedSyscall event via the event +// channel. +func UnimplementedEvent(t *kernel.Task) { + eventchannel.Emit(&uspb.UnimplementedSyscall{ + Tid: int32(t.ThreadID()), + Registers: t.Arch().StateData().Proto(), + }) +} diff --git a/pkg/sentry/syscalls/unimplemented_syscall.proto b/pkg/sentry/syscalls/unimplemented_syscall.proto new file mode 100644 index 000000000..d6febf5b1 --- /dev/null +++ b/pkg/sentry/syscalls/unimplemented_syscall.proto @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor; + +import "pkg/sentry/arch/registers.proto"; + +message UnimplementedSyscall { + // Task ID. + int32 tid = 1; + + // Registers at the time of the call. + Registers registers = 2; +} diff --git a/pkg/sentry/time/BUILD b/pkg/sentry/time/BUILD new file mode 100644 index 000000000..cbcd699d5 --- /dev/null +++ b/pkg/sentry/time/BUILD @@ -0,0 +1,48 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "seqatomic_parameters", + out = "seqatomic_parameters.go", + package = "time", + suffix = "Parameters", + template = "//pkg/sync:generic_seqatomic", + types = { + "Value": "Parameters", + }, +) + +go_library( + name = "time", + srcs = [ + "calibrated_clock.go", + "clock_id.go", + "clocks.go", + "muldiv_amd64.s", + "parameters.go", + "sampler.go", + "sampler_unsafe.go", + "seqatomic_parameters.go", + "tsc_amd64.s", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/time", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/log", + "//pkg/metric", + "//pkg/sync", + "//pkg/syserror", + ], +) + +go_test( + name = "time_test", + srcs = [ + "calibrated_clock_test.go", + "parameters_test.go", + "sampler_test.go", + ], + embed = [":time"], +) diff --git a/pkg/sentry/time/calibrated_clock.go b/pkg/sentry/time/calibrated_clock.go new file mode 100644 index 000000000..cbb95e2d7 --- /dev/null +++ b/pkg/sentry/time/calibrated_clock.go @@ -0,0 +1,269 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package time provides a calibrated clock synchronized to a system reference +// clock. +package time + +import ( + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/metric" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// fallbackMetric tracks failed updates. It is not sync, as it is not critical +// that all occurrences are captured and CalibratedClock may fallback many +// times. +var fallbackMetric = metric.MustCreateNewUint64Metric("/time/fallback", false /* sync */, "Incremented when a clock falls back to system calls due to a failed update") + +// CalibratedClock implements a clock that tracks a reference clock. +// +// Users should call Update at regular intervals of around approxUpdateInterval +// to ensure that the clock does not drift significantly from the reference +// clock. +type CalibratedClock struct { + // mu protects the fields below. + // TODO: consider a sequence counter for read locking. + mu sync.RWMutex + + // ref sample the reference clock that this clock is calibrated + // against. + ref *sampler + + // ready indicates that the fields below are ready for use calculating + // time. + ready bool + + // params are the current timekeeping parameters. + params Parameters + + // errorNS is the estimated clock error in nanoseconds. + errorNS ReferenceNS +} + +// NewCalibratedClock creates a CalibratedClock that tracks the given ClockID. +func NewCalibratedClock(c ClockID) *CalibratedClock { + return &CalibratedClock{ + ref: newSampler(c), + } +} + +// Debugf logs at debug level. +func (c *CalibratedClock) Debugf(format string, v ...interface{}) { + if log.IsLogging(log.Debug) { + args := []interface{}{c.ref.clockID} + args = append(args, v...) + log.Debugf("CalibratedClock(%v): "+format, args...) + } +} + +// Infof logs at debug level. +func (c *CalibratedClock) Infof(format string, v ...interface{}) { + if log.IsLogging(log.Info) { + args := []interface{}{c.ref.clockID} + args = append(args, v...) + log.Infof("CalibratedClock(%v): "+format, args...) + } +} + +// Warningf logs at debug level. +func (c *CalibratedClock) Warningf(format string, v ...interface{}) { + if log.IsLogging(log.Warning) { + args := []interface{}{c.ref.clockID} + args = append(args, v...) + log.Warningf("CalibratedClock(%v): "+format, args...) + } +} + +// reset forces the clock to restart the calibration process, logging the +// passed message. +func (c *CalibratedClock) reset(str string, v ...interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.resetLocked(str, v...) +} + +// resetLocked is equivalent to reset with c.mu already held for writing. +func (c *CalibratedClock) resetLocked(str string, v ...interface{}) { + c.Warningf(str+" Resetting clock; time may jump.", v...) + c.ready = false + c.ref.Reset() + fallbackMetric.Increment() +} + +// updateParams updates the timekeeping parameters based on the passed +// parameters. +// +// actual is the actual estimated timekeeping parameters. The stored parameters +// may need to be adjusted slightly from these values to compensate for error. +// +// Preconditions: c.mu must be held for writing. +func (c *CalibratedClock) updateParams(actual Parameters) { + if !c.ready { + // At initial calibration there is nothing to correct. + c.params = actual + c.ready = true + + c.Infof("ready") + + return + } + + // Otherwise, adjust the params to correct for errors. + newParams, errorNS, err := errorAdjust(c.params, actual, actual.BaseCycles) + if err != nil { + // Something is very wrong. Reset and try again from the + // beginning. + c.resetLocked("Unable to update params: %v.", err) + return + } + logErrorAdjustment(c.ref.clockID, errorNS, c.params, newParams) + + if errorNS.Magnitude() >= MaxClockError { + // We should never get such extreme error, something is very + // wrong. Reset everything and start again. + // + // N.B. logErrorAdjustment will have already logged the error + // at warning level. + // + // TODO: We could allow Realtime clock jumps here. + c.resetLocked("Extreme clock error.") + return + } + + c.params = newParams + c.errorNS = errorNS +} + +// Update runs the update step of the clock, updating its synchronization with +// the reference clock. +// +// Update returns timekeeping and true with the new timekeeping parameters if +// the clock is calibrated. Update should be called regularly to prevent the +// clock from getting significantly out of sync from the reference clock. +// +// The returned timekeeping parameters are invalidated on the next call to +// Update. +func (c *CalibratedClock) Update() (Parameters, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + if err := c.ref.Sample(); err != nil { + c.resetLocked("Unable to update calibrated clock: %v.", err) + return Parameters{}, false + } + + oldest, newest, ok := c.ref.Range() + if !ok { + // Not ready yet. + return Parameters{}, false + } + + minCount := uint64(newest.before - oldest.after) + maxCount := uint64(newest.after - oldest.before) + refInterval := uint64(newest.ref - oldest.ref) + + // freq hz = count / (interval ns) * (nsPerS ns) / (1 s) + nsPerS := uint64(time.Second.Nanoseconds()) + + minHz, ok := muldiv64(minCount, nsPerS, refInterval) + if !ok { + c.resetLocked("Unable to update calibrated clock: (%v - %v) * %v / %v overflows.", newest.before, oldest.after, nsPerS, refInterval) + return Parameters{}, false + } + + maxHz, ok := muldiv64(maxCount, nsPerS, refInterval) + if !ok { + c.resetLocked("Unable to update calibrated clock: (%v - %v) * %v / %v overflows.", newest.after, oldest.before, nsPerS, refInterval) + return Parameters{}, false + } + + c.updateParams(Parameters{ + Frequency: (minHz + maxHz) / 2, + BaseRef: newest.ref, + BaseCycles: newest.after, + }) + + return c.params, true +} + +// GetTime returns the current time based on the clock calibration. +func (c *CalibratedClock) GetTime() (int64, error) { + c.mu.RLock() + + if !c.ready { + // Fallback to a syscall. + now, err := c.ref.Syscall() + c.mu.RUnlock() + return int64(now), err + } + + now := c.ref.Cycles() + v, ok := c.params.ComputeTime(now) + if !ok { + // Something is seriously wrong with the clock. Try + // again with syscalls. + c.resetLocked("Time computation overflowed. params = %+v, now = %v.", c.params, now) + now, err := c.ref.Syscall() + c.mu.RUnlock() + return int64(now), err + } + + c.mu.RUnlock() + return v, nil +} + +// CalibratedClocks contains calibrated monotonic and realtime clocks. +// +// TODO: We know that Linux runs the monotonic and realtime clocks at +// the same rate, so rather than tracking both individually, we could do one +// calibration for both clocks. +type CalibratedClocks struct { + // monotonic is the clock tracking the system monotonic clock. + monotonic *CalibratedClock + + // realtime is the realtime equivalent of monotonic. + realtime *CalibratedClock +} + +// NewCalibratedClocks creates a CalibratedClocks. +func NewCalibratedClocks() *CalibratedClocks { + return &CalibratedClocks{ + monotonic: NewCalibratedClock(Monotonic), + realtime: NewCalibratedClock(Realtime), + } +} + +// Update implements Clocks.Update. +func (c *CalibratedClocks) Update() (Parameters, bool, Parameters, bool) { + monotonicParams, monotonicOk := c.monotonic.Update() + realtimeParams, realtimeOk := c.realtime.Update() + + return monotonicParams, monotonicOk, realtimeParams, realtimeOk +} + +// GetTime implements Clocks.GetTime. +func (c *CalibratedClocks) GetTime(id ClockID) (int64, error) { + switch id { + case Monotonic: + return c.monotonic.GetTime() + case Realtime: + return c.realtime.GetTime() + default: + return 0, syserror.EINVAL + } +} diff --git a/pkg/sentry/time/calibrated_clock_test.go b/pkg/sentry/time/calibrated_clock_test.go new file mode 100644 index 000000000..8b6dd5592 --- /dev/null +++ b/pkg/sentry/time/calibrated_clock_test.go @@ -0,0 +1,186 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "testing" + "time" +) + +// newTestCalibratedClock returns a CalibratedClock that collects samples from +// the given sample list and cycle counts from the given cycle list. +func newTestCalibratedClock(samples []sample, cycles []TSCValue) *CalibratedClock { + return &CalibratedClock{ + ref: newTestSampler(samples, cycles), + } +} + +func TestConstantFrequency(t *testing.T) { + // Perfectly constant frequency. + samples := []sample{ + {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100}, + {before: 200000, after: 200000 + defaultOverheadCycles, ref: 200}, + {before: 300000, after: 300000 + defaultOverheadCycles, ref: 300}, + {before: 400000, after: 400000 + defaultOverheadCycles, ref: 400}, + {before: 500000, after: 500000 + defaultOverheadCycles, ref: 500}, + {before: 600000, after: 600000 + defaultOverheadCycles, ref: 600}, + {before: 700000, after: 700000 + defaultOverheadCycles, ref: 700}, + } + + c := newTestCalibratedClock(samples, nil) + + // Update from all samples. + for range samples { + c.Update() + } + + c.mu.RLock() + if !c.ready { + c.mu.RUnlock() + t.Fatalf("clock not ready") + } + // A bit after the last sample. + now, ok := c.params.ComputeTime(750000) + c.mu.RUnlock() + if !ok { + t.Fatalf("ComputeTime ok got %v want true", ok) + } + + t.Logf("now: %v", now) + + // Time should be between the current sample and where we'd expect the + // next sample. + if now < 700 || now > 800 { + t.Errorf("now got %v want > 700 && < 800", now) + } +} + +func TestErrorCorrection(t *testing.T) { + testCases := []struct { + name string + samples [5]sample + projectedTimeStart int64 + projectedTimeEnd int64 + }{ + // Initial calibration should be ~1MHz for each of these, and + // the reference clock changes in samples[2]. + { + name: "slow-down", + samples: [5]sample{ + {before: 1000000, after: 1000001, ref: ReferenceNS(1 * ApproxUpdateInterval.Nanoseconds())}, + {before: 2000000, after: 2000001, ref: ReferenceNS(2 * ApproxUpdateInterval.Nanoseconds())}, + // Reference clock has slowed down, causing 100ms of error. + {before: 3010000, after: 3010001, ref: ReferenceNS(3 * ApproxUpdateInterval.Nanoseconds())}, + {before: 4020000, after: 4020001, ref: ReferenceNS(4 * ApproxUpdateInterval.Nanoseconds())}, + {before: 5030000, after: 5030001, ref: ReferenceNS(5 * ApproxUpdateInterval.Nanoseconds())}, + }, + projectedTimeStart: 3005 * time.Millisecond.Nanoseconds(), + projectedTimeEnd: 3015 * time.Millisecond.Nanoseconds(), + }, + { + name: "speed-up", + samples: [5]sample{ + {before: 1000000, after: 1000001, ref: ReferenceNS(1 * ApproxUpdateInterval.Nanoseconds())}, + {before: 2000000, after: 2000001, ref: ReferenceNS(2 * ApproxUpdateInterval.Nanoseconds())}, + // Reference clock has sped up, causing 100ms of error. + {before: 2990000, after: 2990001, ref: ReferenceNS(3 * ApproxUpdateInterval.Nanoseconds())}, + {before: 3980000, after: 3980001, ref: ReferenceNS(4 * ApproxUpdateInterval.Nanoseconds())}, + {before: 4970000, after: 4970001, ref: ReferenceNS(5 * ApproxUpdateInterval.Nanoseconds())}, + }, + projectedTimeStart: 2985 * time.Millisecond.Nanoseconds(), + projectedTimeEnd: 2995 * time.Millisecond.Nanoseconds(), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := newTestCalibratedClock(tc.samples[:], nil) + + // Initial calibration takes two updates. + _, ok := c.Update() + if ok { + t.Fatalf("Update ready too early") + } + + params, ok := c.Update() + if !ok { + t.Fatalf("Update not ready") + } + + // Initial calibration is ~1MHz. + hz := params.Frequency + if hz < 990000 || hz > 1010000 { + t.Fatalf("Frequency got %v want > 990kHz && < 1010kHz", hz) + } + + // Project time at the next update. Given the 1MHz + // calibration, it is expected to be ~3.1s/2.9s, not + // the actual 3s. + // + // N.B. the next update time is the "after" time above. + projected, ok := params.ComputeTime(tc.samples[2].after) + if !ok { + t.Fatalf("ComputeTime ok got %v want true", ok) + } + if projected < tc.projectedTimeStart || projected > tc.projectedTimeEnd { + t.Fatalf("ComputeTime(%v) got %v want > %v && < %v", tc.samples[2].after, projected, tc.projectedTimeStart, tc.projectedTimeEnd) + } + + // Update again to see the changed reference clock. + params, ok = c.Update() + if !ok { + t.Fatalf("Update not ready") + } + + // We now know that TSC = tc.samples[2].after -> 3s, + // but with the previous params indicated that TSC + // tc.samples[2].after -> 3.5s/2.5s. We can't allow the + // clock to go backwards, and having the clock jump + // forwards is undesirable. There should be a smooth + // transition that corrects the clock error over time. + // Check that the clock is continuous at TSC = + // tc.samples[2].after. + newProjected, ok := params.ComputeTime(tc.samples[2].after) + if !ok { + t.Fatalf("ComputeTime ok got %v want true", ok) + } + if newProjected != projected { + t.Errorf("Discontinuous time; ComputeTime(%v) got %v want %v", tc.samples[2].after, newProjected, projected) + } + + // As the reference clock stablizes, ensure that the clock error + // decreases. + initialErr := c.errorNS + t.Logf("initial error: %v ns", initialErr) + + _, ok = c.Update() + if !ok { + t.Fatalf("Update not ready") + } + if c.errorNS.Magnitude() > initialErr.Magnitude() { + t.Errorf("errorNS increased, got %v want |%v| <= |%v|", c.errorNS, c.errorNS, initialErr) + } + + _, ok = c.Update() + if !ok { + t.Fatalf("Update not ready") + } + if c.errorNS.Magnitude() > initialErr.Magnitude() { + t.Errorf("errorNS increased, got %v want |%v| <= |%v|", c.errorNS, c.errorNS, initialErr) + } + + t.Logf("final error: %v ns", c.errorNS) + }) + } +} diff --git a/pkg/sentry/time/clock_id.go b/pkg/sentry/time/clock_id.go new file mode 100644 index 000000000..500102e58 --- /dev/null +++ b/pkg/sentry/time/clock_id.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "strconv" +) + +// ClockID is a Linux clock identifier. +type ClockID int32 + +// These are the supported Linux clock identifiers. +const ( + Realtime ClockID = iota + Monotonic +) + +// String implements fmt.Stringer.String. +func (c ClockID) String() string { + switch c { + case Realtime: + return "Realtime" + case Monotonic: + return "Monotonic" + default: + return strconv.Itoa(int(c)) + } +} diff --git a/pkg/sentry/time/clocks.go b/pkg/sentry/time/clocks.go new file mode 100644 index 000000000..9925b407d --- /dev/null +++ b/pkg/sentry/time/clocks.go @@ -0,0 +1,31 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +// Clocks represents a clock source that contains both a monotonic and realtime +// clock. +type Clocks interface { + // Update performs an update step, keeping the clocks in sync with the + // reference host clocks, and returning the new timekeeping parameters. + // + // Update should be called at approximately ApproxUpdateInterval. + Update() (monotonicParams Parameters, monotonicOk bool, realtimeParam Parameters, realtimeOk bool) + + // GetTime returns the current time in nanoseconds for the given clock. + // + // Clocks implementations must support at least Monotonic and + // Realtime. + GetTime(c ClockID) (int64, error) +} diff --git a/pkg/sentry/time/muldiv_amd64.s b/pkg/sentry/time/muldiv_amd64.s new file mode 100644 index 000000000..291940b1d --- /dev/null +++ b/pkg/sentry/time/muldiv_amd64.s @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +// Documentation is available in parameters.go. +// +// func muldiv64(value, multiplier, divisor uint64) (uint64, bool) +TEXT ·muldiv64(SB),NOSPLIT,$0-33 + MOVQ value+0(FP), AX + MOVQ multiplier+8(FP), BX + MOVQ divisor+16(FP), CX + + // Multiply AX*BX and store result in DX:AX. + MULQ BX + + // If divisor <= (value*multiplier) / 2^64, then the division will overflow. + // + // (value*multiplier) / 2^64 is DX:AX >> 64, or simply DX. + CMPQ CX, DX + JLE overflow + + // Divide DX:AX by CX. + DIVQ CX + + MOVQ AX, result+24(FP) + MOVB $1, ok+32(FP) + RET + +overflow: + MOVQ $0, result+24(FP) + MOVB $0, ok+32(FP) + RET diff --git a/pkg/sentry/time/parameters.go b/pkg/sentry/time/parameters.go new file mode 100644 index 000000000..594b4874b --- /dev/null +++ b/pkg/sentry/time/parameters.go @@ -0,0 +1,239 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "fmt" + "time" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +const ( + // ApproxUpdateInterval is the approximate interval that parameters + // should be updated at. + // + // Error correction assumes that the next update will occur after this + // much time. + // + // If an update occurs before ApproxUpdateInterval passes, it has no + // adverse effect on error correction behavior. + // + // If an update occurs after ApproxUpdateInterval passes, the clock + // will overshoot its error correction target and begin accumulating + // error in the other direction. + // + // If updates occur after more than 2*ApproxUpdateInterval passes, the + // clock becomes unstable, accumulating more error than it had + // originally. Repeated updates after more than 2*ApproxUpdateInterval + // will cause unbounded increases in error. + // + // These statements assume that the host clock does not change. Actual + // error will depend upon host clock changes. + // + // TODO: make error correction more robust to delayed + // updates. + ApproxUpdateInterval = 1 * time.Second + + // MaxClockError is the maximum amount of error that the clocks will + // try to correct. + // + // This limit: + // + // * Puts a limit on cases of otherwise unbounded increases in error. + // + // * Avoids unreasonably large frequency adjustments required to + // correct large errors over a single update interval. + MaxClockError = ReferenceNS(ApproxUpdateInterval) / 4 +) + +// Parameters are the timekeeping parameters needed to compute the current +// time. +type Parameters struct { + // BaseCycles was the TSC counter value when the time was BaseRef. + BaseCycles TSCValue + + // BaseRef is the reference clock time in nanoseconds corresponding to + // BaseCycles. + BaseRef ReferenceNS + + // Frequency is the frequency of the cycle clock in Hertz. + Frequency uint64 +} + +// muldiv64 multiplies two 64-bit numbers, then divides the result by another +// 64-bit number. +// +// It requires that the result fit in 64 bits, but doesn't require that +// intermediate values do; in particular, the result of the multiplication may +// require 128 bits. +// +// It returns !ok if divisor is zero or the result does not fit in 64 bits. +func muldiv64(value, multiplier, divisor uint64) (uint64, bool) + +// ComputeTime calculates the current time from a "now" TSC value. +// +// time = ref + (now - base) / f +func (p Parameters) ComputeTime(nowCycles TSCValue) (int64, bool) { + diffCycles := nowCycles - p.BaseCycles + if diffCycles < 0 { + log.Warningf("now cycles %v < base cycles %v", nowCycles, p.BaseCycles) + diffCycles = 0 + } + + // Overflow "won't ever happen". If diffCycles is the max value + // (2^63 - 1), then to overflow, + // + // frequency <= ((2^63 - 1) * 10^9) / 2^64 = 500Mhz + // + // A TSC running at 2GHz takes 201 years to reach 2^63-1. 805 years at + // 500MHz. + diffNS, ok := muldiv64(uint64(diffCycles), uint64(time.Second.Nanoseconds()), p.Frequency) + return int64(uint64(p.BaseRef) + diffNS), ok +} + +// errorAdjust returns a new Parameters struct "adjusted" that satisfies: +// +// 1. adjusted.ComputeTime(now) = prevParams.ComputeTime(now) +// * i.e., the current time does not jump. +// +// 2. adjusted.ComputeTime(TSC at next update) = newParams.ComputeTime(TSC at next update) +// * i.e., Any error between prevParams and newParams will be corrected over +// the course of the next update period. +// +// errorAdjust also returns the current clock error. +// +// Preconditions: +// * newParams.BaseCycles >= prevParams.BaseCycles; i.e., TSC must not go +// backwards. +// * newParams.BaseCycles <= now; i.e., the new parameters be computed at or +// before now. +func errorAdjust(prevParams Parameters, newParams Parameters, now TSCValue) (Parameters, ReferenceNS, error) { + if newParams.BaseCycles < prevParams.BaseCycles { + // Oh dear! Something is very wrong. + return Parameters{}, 0, fmt.Errorf("TSC went backwards in updated clock params: %v < %v", newParams.BaseCycles, prevParams.BaseCycles) + } + if newParams.BaseCycles > now { + return Parameters{}, 0, fmt.Errorf("parameters contain base cycles later than now: %v > %v", newParams.BaseCycles, now) + } + + intervalNS := int64(ApproxUpdateInterval.Nanoseconds()) + nsPerSec := uint64(time.Second.Nanoseconds()) + + // Current time as computed by prevParams. + oldNowNS, ok := prevParams.ComputeTime(now) + if !ok { + return Parameters{}, 0, fmt.Errorf("old now time computation overflowed. params = %+v, now = %v", prevParams, now) + } + + // We expect the update ticker to run based on this clock (i.e., it has + // been using prevParams and will use the returned adjusted + // parameters). Hence it will decide to fire intervalNS from the + // current (oldNowNS) "now". + nextNS := oldNowNS + intervalNS + + if nextNS <= int64(newParams.BaseRef) { + // The next update time already passed before the new + // parameters were created! We definitely can't correct the + // error by then. + return Parameters{}, 0, fmt.Errorf("unable to correct error in single period. oldNowNS = %v, nextNS = %v, p = %v", oldNowNS, nextNS, newParams) + } + + // For what TSC value next will newParams.ComputeTime(next) = nextNS? + // + // Solve ComputeTime for next: + // + // next = newParams.Frequency * (nextNS - newParams.BaseRef) + newParams.BaseCycles + c, ok := muldiv64(newParams.Frequency, uint64(nextNS-int64(newParams.BaseRef)), nsPerSec) + if !ok { + return Parameters{}, 0, fmt.Errorf("%v * (%v - %v) / %v overflows", newParams.Frequency, nextNS, newParams.BaseRef, nsPerSec) + } + + cycles := TSCValue(c) + next := cycles + newParams.BaseCycles + + if next <= now { + // The next update time already passed now with the new + // parameters! We can't correct the error in a single period. + return Parameters{}, 0, fmt.Errorf("unable to correct error in single period. oldNowNS = %v, nextNS = %v, now = %v, next = %v", oldNowNS, nextNS, now, next) + } + + // We want to solve for parameters that satisfy: + // + // adjusted.ComputeTime(now) = oldNowNS + // + // adjusted.ComputeTime(next) = nextNS + // + // i.e., the current time does not change, but by the time we reach + // next we reach the same time as newParams. + + // We choose to keep BaseCycles fixed. + adjusted := Parameters{ + BaseCycles: newParams.BaseCycles, + } + + // We want a slope such that time goes from oldNowNS to nextNS when + // we reach next. + // + // In other words, cycles should increase by next - now in the next + // interval. + + cycles = next - now + ns := intervalNS + + // adjusted.Frequency = cycles / ns + adjusted.Frequency, ok = muldiv64(uint64(cycles), nsPerSec, uint64(ns)) + if !ok { + return Parameters{}, 0, fmt.Errorf("(%v - %v) * %v / %v overflows", next, now, nsPerSec, ns) + } + + // Now choose a base reference such that the current time remains the + // same. Note that this is just ComputeTime, solving for BaseRef: + // + // oldNowNS = BaseRef + (now - BaseCycles) / Frequency + // BaseRef = oldNowNS - (now - BaseCycles) / Frequency + diffNS, ok := muldiv64(uint64(now-adjusted.BaseCycles), nsPerSec, adjusted.Frequency) + if !ok { + return Parameters{}, 0, fmt.Errorf("(%v - %v) * %v / %v overflows", now, adjusted.BaseCycles, nsPerSec, adjusted.Frequency) + } + + adjusted.BaseRef = ReferenceNS(oldNowNS - int64(diffNS)) + + // The error is the difference between the current time and what the + // new parameters say the current time should be. + newNowNS, ok := newParams.ComputeTime(now) + if !ok { + return Parameters{}, 0, fmt.Errorf("new now time computation overflowed. params = %+v, now = %v", newParams, now) + } + + errorNS := ReferenceNS(oldNowNS - newNowNS) + + return adjusted, errorNS, nil +} + +// logErrorAdjustment logs the clock error and associated error correction +// frequency adjustment. +// +// The log level is determined by the error severity. +func logErrorAdjustment(clock ClockID, errorNS ReferenceNS, orig, adjusted Parameters) { + fn := log.Debugf + if int64(errorNS.Magnitude()) > time.Millisecond.Nanoseconds() { + fn = log.Warningf + } else if int64(errorNS.Magnitude()) > 10*time.Microsecond.Nanoseconds() { + fn = log.Infof + } + + fn("Clock(%v): error: %v ns, adjusted frequency from %v Hz to %v Hz", clock, errorNS, orig.Frequency, adjusted.Frequency) +} diff --git a/pkg/sentry/time/parameters_test.go b/pkg/sentry/time/parameters_test.go new file mode 100644 index 000000000..7394fc5ee --- /dev/null +++ b/pkg/sentry/time/parameters_test.go @@ -0,0 +1,486 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "math" + "testing" + "time" +) + +func TestParametersComputeTime(t *testing.T) { + testCases := []struct { + name string + params Parameters + now TSCValue + want int64 + }{ + { + // Now is the same as the base cycles. + name: "base-cycles", + params: Parameters{ + BaseCycles: 10000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 10000, + want: 5000 * time.Millisecond.Nanoseconds(), + }, + { + // Now is the behind the base cycles. Time is frozen. + name: "backwards", + params: Parameters{ + BaseCycles: 10000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 9000, + want: 5000 * time.Millisecond.Nanoseconds(), + }, + { + // Now is ahead of the base cycles. + name: "ahead", + params: Parameters{ + BaseCycles: 10000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 15000, + want: 5500 * time.Millisecond.Nanoseconds(), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, ok := tc.params.ComputeTime(tc.now) + if !ok { + t.Errorf("ComputeTime ok got %v want true", got) + } + if got != tc.want { + t.Errorf("ComputeTime got %+v want %+v", got, tc.want) + } + }) + } +} + +func TestParametersErrorAdjust(t *testing.T) { + testCases := []struct { + name string + oldParams Parameters + now TSCValue + newParams Parameters + want Parameters + errorNS ReferenceNS + wantErr bool + }{ + { + // newParams are perfectly continuous with oldParams + // and don't need adjustment. + name: "continuous", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 50000, + newParams: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + want: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + }, + { + // Same as "continuous", but with now ahead of + // newParams.BaseCycles. The result is the same as + // there is no error to correct. + name: "continuous-nowdiff", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 60000, + newParams: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + want: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + }, + { + // errorAdjust bails out if the TSC goes backwards. + name: "tsc-backwards", + oldParams: Parameters{ + BaseCycles: 10000, + BaseRef: ReferenceNS(1000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 9000, + newParams: Parameters{ + BaseCycles: 9000, + BaseRef: ReferenceNS(1100 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + wantErr: true, + }, + { + // errorAdjust bails out if new params are from after now. + name: "params-after-now", + oldParams: Parameters{ + BaseCycles: 10000, + BaseRef: ReferenceNS(1000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 11000, + newParams: Parameters{ + BaseCycles: 12000, + BaseRef: ReferenceNS(1200 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + wantErr: true, + }, + { + // Host clock sped up. + name: "speed-up", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 45000, + // Host frequency changed to 9000 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 45000, + // From oldParams, we think ref = 4.5s at cycles = 45000. + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 9000, + }, + want: Parameters{ + BaseCycles: 45000, + BaseRef: ReferenceNS(4500 * time.Millisecond.Nanoseconds()), + // We must decrease the new frequency by 50% to + // correct 0.5s of error in 1s + // (ApproxUpdateInterval). + Frequency: 4500, + }, + errorNS: ReferenceNS(-500 * time.Millisecond.Nanoseconds()), + }, + { + // Host clock sped up, with now ahead of newParams. + name: "speed-up-nowdiff", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 50000, + // Host frequency changed to 9000 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 45000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 9000, + }, + // nextRef = 6000ms + // nextCycles = 9000 * (6000ms - 5000ms) + 45000 + // nextCycles = 9000 * (1s) + 45000 + // nextCycles = 54000 + // f = (54000 - 50000) / 1s = 4000 + // + // ref = 5000ms - (50000 - 45000) / 4000 + // ref = 3.75s + want: Parameters{ + BaseCycles: 45000, + BaseRef: ReferenceNS(3750 * time.Millisecond.Nanoseconds()), + Frequency: 4000, + }, + // oldNow = 50000 * 10000 = 5s + // newNow = (50000 - 45000) / 9000 + 5s = 5.555s + errorNS: ReferenceNS((5000*time.Millisecond - 5555555555).Nanoseconds()), + }, + { + // Host clock sped up. The new parameters are so far + // ahead that the next update time already passed. + name: "speed-up-uncorrectable-baseref", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 50000, + // Host frequency changed to 5000 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 45000, + BaseRef: ReferenceNS(9000 * time.Millisecond.Nanoseconds()), + Frequency: 5000, + }, + // The next update should be at 10s, but newParams + // already passed 6s. Thus it is impossible to correct + // the clock by then. + wantErr: true, + }, + { + // Host clock sped up. The new parameters are moving so + // fast that the next update should be before now. + name: "speed-up-uncorrectable-frequency", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 55000, + // Host frequency changed to 7500 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 45000, + BaseRef: ReferenceNS(6000 * time.Millisecond.Nanoseconds()), + Frequency: 7500, + }, + // The next update should be at 6.5s, but newParams are + // so far ahead and fast that they reach 6.5s at cycle + // 48750, which before now! Thus it is impossible to + // correct the clock by then. + wantErr: true, + }, + { + // Host clock slowed down. + name: "slow-down", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 55000, + // Host frequency changed to 11000 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 55000, + // From oldParams, we think ref = 5.5s at cycles = 55000. + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 11000, + }, + want: Parameters{ + BaseCycles: 55000, + BaseRef: ReferenceNS(5500 * time.Millisecond.Nanoseconds()), + // We must increase the new frequency by 50% to + // correct 0.5s of error in 1s + // (ApproxUpdateInterval). + Frequency: 16500, + }, + errorNS: ReferenceNS(500 * time.Millisecond.Nanoseconds()), + }, + { + // Host clock slowed down, with now ahead of newParams. + name: "slow-down-nowdiff", + oldParams: Parameters{ + BaseCycles: 0, + BaseRef: 0, + Frequency: 10000, + }, + now: 60000, + // Host frequency changed to 11000 immediately after + // oldParams was returned. + newParams: Parameters{ + BaseCycles: 55000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 11000, + }, + // nextRef = 7000ms + // nextCycles = 11000 * (7000ms - 5000ms) + 55000 + // nextCycles = 11000 * (2000ms) + 55000 + // nextCycles = 77000 + // f = (77000 - 60000) / 1s = 17000 + // + // ref = 6000ms - (60000 - 55000) / 17000 + // ref = 5705882353ns + want: Parameters{ + BaseCycles: 55000, + BaseRef: ReferenceNS(5705882353), + Frequency: 17000, + }, + // oldNow = 60000 * 10000 = 6s + // newNow = (60000 - 55000) / 11000 + 5s = 5.4545s + errorNS: ReferenceNS((6*time.Second - 5454545454).Nanoseconds()), + }, + { + // Host time went backwards. + name: "time-backwards", + oldParams: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 60000, + newParams: Parameters{ + BaseCycles: 60000, + // From oldParams, we think ref = 6s at cycles = 60000. + BaseRef: ReferenceNS(4000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + want: Parameters{ + BaseCycles: 60000, + BaseRef: ReferenceNS(6000 * time.Millisecond.Nanoseconds()), + // We must increase the frequency by 200% to + // correct 2s of error in 1s + // (ApproxUpdateInterval). + Frequency: 30000, + }, + errorNS: ReferenceNS(2000 * time.Millisecond.Nanoseconds()), + }, + { + // Host time went backwards, with now ahead of newParams. + name: "time-backwards-nowdiff", + oldParams: Parameters{ + BaseCycles: 50000, + BaseRef: ReferenceNS(5000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + now: 65000, + // nextRef = 7500ms + // nextCycles = 10000 * (7500ms - 4000ms) + 60000 + // nextCycles = 10000 * (3500ms) + 60000 + // nextCycles = 95000 + // f = (95000 - 65000) / 1s = 30000 + // + // ref = 6500ms - (65000 - 60000) / 30000 + // ref = 6333333333ns + newParams: Parameters{ + BaseCycles: 60000, + BaseRef: ReferenceNS(4000 * time.Millisecond.Nanoseconds()), + Frequency: 10000, + }, + want: Parameters{ + BaseCycles: 60000, + BaseRef: ReferenceNS(6333333334), + Frequency: 30000, + }, + // oldNow = 65000 * 10000 = 6.5s + // newNow = (65000 - 60000) / 10000 + 4s = 4.5s + errorNS: ReferenceNS(2000 * time.Millisecond.Nanoseconds()), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, errorNS, err := errorAdjust(tc.oldParams, tc.newParams, tc.now) + if err != nil && !tc.wantErr { + t.Errorf("err got %v want nil", err) + } else if err == nil && tc.wantErr { + t.Errorf("err got nil want non-nil") + } + + if got != tc.want { + t.Errorf("Parameters got %+v want %+v", got, tc.want) + } + if errorNS != tc.errorNS { + t.Errorf("errorNS got %v want %v", errorNS, tc.errorNS) + } + }) + } +} + +func testMuldiv(t *testing.T, v uint64) { + for i := uint64(1); i <= 1000000; i++ { + mult := uint64(1000000000) + div := i * mult + res, ok := muldiv64(v, mult, div) + if !ok { + t.Errorf("Result of %v * %v / %v ok got false want true", v, mult, div) + } + if want := v / i; res != want { + t.Errorf("Bad result of %v * %v / %v: got %v, want %v", v, mult, div, res, want) + } + } +} + +func TestMulDiv(t *testing.T) { + testMuldiv(t, math.MaxUint64) + for i := int64(-10); i <= 10; i++ { + testMuldiv(t, uint64(i)) + } +} + +func TestMulDivZero(t *testing.T) { + if r, ok := muldiv64(2, 4, 0); ok { + t.Errorf("muldiv64(2, 4, 0) got %d, ok want !ok", r) + } + + if r, ok := muldiv64(0, 0, 0); ok { + t.Errorf("muldiv64(0, 0, 0) got %d, ok want !ok", r) + } +} + +func TestMulDivOverflow(t *testing.T) { + testCases := []struct { + name string + val uint64 + mult uint64 + div uint64 + ok bool + ret uint64 + }{ + { + name: "2^62", + val: 1 << 63, + mult: 4, + div: 8, + ok: true, + ret: 1 << 62, + }, + { + name: "2^64-1", + val: 0xffffffffffffffff, + mult: 1, + div: 1, + ok: true, + ret: 0xffffffffffffffff, + }, + { + name: "2^64", + val: 1 << 63, + mult: 4, + div: 2, + ok: false, + }, + { + name: "2^125", + val: 1 << 63, + mult: 1 << 63, + div: 2, + ok: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r, ok := muldiv64(tc.val, tc.mult, tc.div) + if ok != tc.ok { + t.Errorf("ok got %v want %v", ok, tc.ok) + } + if tc.ok && r != tc.ret { + t.Errorf("ret got %v want %v", r, tc.ret) + } + }) + } +} diff --git a/pkg/sentry/time/sampler.go b/pkg/sentry/time/sampler.go new file mode 100644 index 000000000..cf581b5fa --- /dev/null +++ b/pkg/sentry/time/sampler.go @@ -0,0 +1,225 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "errors" + + "gvisor.googlesource.com/gvisor/pkg/log" +) + +const ( + // defaultOverheadTSC is the default estimated syscall overhead in TSC cycles. + // It is further refined as syscalls are made. + defaultOverheadCycles = 1 * 1000 + + // maxOverheadCycles is the maximum allowed syscall overhead in TSC cycles. + maxOverheadCycles = 100 * defaultOverheadCycles + + // maxSampleLoops is the maximum number of times to try to get a clock sample + // under the expected overhead. + maxSampleLoops = 5 + + // maxSamples is the maximum number of samples to collect. + maxSamples = 10 +) + +// errOverheadTooHigh is returned from sampler.Sample if the syscall +// overhead is too high. +var errOverheadTooHigh = errors.New("time syscall overhead exceeds maximum") + +// TSCValue is a value from the TSC. +type TSCValue int64 + +// Rdtsc reads the TSC. +// +// Intel SDM, Vol 3, Ch 17.15: +// "The RDTSC instruction reads the time-stamp counter and is guaranteed to +// return a monotonically increasing unique value whenever executed, except for +// a 64-bit counter wraparound. Intel guarantees that the time-stamp counter +// will not wraparound within 10 years after being reset." +// +// We use int64, so we have 5 years before wrap-around. +func Rdtsc() TSCValue + +// ReferenceNS are nanoseconds in the reference clock domain. +// int64 gives us ~290 years before this overflows. +type ReferenceNS int64 + +// Magnitude returns the absolute value of r. +func (r ReferenceNS) Magnitude() ReferenceNS { + if r < 0 { + return -r + } + return r +} + +// cycleClock is a TSC-based cycle clock. +type cycleClock interface { + // Cycles returns a count value from the TSC. + Cycles() TSCValue +} + +// tscCycleClock is a cycleClock that uses the real TSC. +type tscCycleClock struct{} + +// Cycles implements cycleClock.Cycles. +func (tscCycleClock) Cycles() TSCValue { + return Rdtsc() +} + +// sample contains a sample from the reference clock, with TSC values from +// before and after the reference clock value was captured. +type sample struct { + before TSCValue + after TSCValue + ref ReferenceNS +} + +// Overhead returns the sample overhead in TSC cycles. +func (s *sample) Overhead() TSCValue { + return s.after - s.before +} + +// referenceClocks collects individual samples from a reference clock ID and +// TSC. +type referenceClocks interface { + cycleClock + + // Sample returns a single sample from the reference clock ID. + Sample(c ClockID) (sample, error) +} + +// sampler collects samples from a reference system clock, minimizing +// the overhead in each sample. +type sampler struct { + // clockID is the reference clock ID (e.g., CLOCK_MONOTONIC). + clockID ClockID + + // clocks provides raw samples. + clocks referenceClocks + + // overhead is the estimated sample overhead in TSC cycles. + overhead TSCValue + + // samples is a ring buffer of the latest samples collected. + samples []sample +} + +// newSampler creates a sampler for clockID. +func newSampler(c ClockID) *sampler { + return &sampler{ + clockID: c, + clocks: syscallTSCReferenceClocks{}, + overhead: defaultOverheadCycles, + } +} + +// Reset discards previously collected clock samples. +func (s *sampler) Reset() { + s.overhead = defaultOverheadCycles + s.samples = []sample{} +} + +// lowOverheadSample returns a reference clock sample with minimized syscall overhead. +func (s *sampler) lowOverheadSample() (sample, error) { + for { + for i := 0; i < maxSampleLoops; i++ { + samp, err := s.clocks.Sample(s.clockID) + if err != nil { + return sample{}, err + } + + if samp.before > samp.after { + log.Warningf("TSC went backwards: %v > %v", samp.before, samp.after) + continue + } + + if samp.Overhead() <= s.overhead { + return samp, nil + } + } + + // Couldn't get a sample with the current overhead. Increase it. + newOverhead := 2 * s.overhead + if newOverhead > maxOverheadCycles { + // We'll give it one more shot with the max overhead. + + if s.overhead == maxOverheadCycles { + return sample{}, errOverheadTooHigh + } + + newOverhead = maxOverheadCycles + } + + s.overhead = newOverhead + log.Debugf("Time: Adjusting syscall overhead up to %v", s.overhead) + } +} + +// Sample collects a reference clock sample. +func (s *sampler) Sample() error { + sample, err := s.lowOverheadSample() + if err != nil { + return err + } + + s.samples = append(s.samples, sample) + if len(s.samples) > maxSamples { + s.samples = s.samples[1:] + } + + // If the 4 most recent samples all have an overhead less than half the + // expected overhead, adjust downwards. + if len(s.samples) < 4 { + return nil + } + + for _, sample := range s.samples[len(s.samples)-4:] { + if sample.Overhead() > s.overhead/2 { + return nil + } + } + + s.overhead -= s.overhead / 8 + log.Debugf("Time: Adjusting syscall overhead down to %v", s.overhead) + + return nil +} + +// Syscall returns the current raw reference time without storing TSC +// samples. +func (s *sampler) Syscall() (ReferenceNS, error) { + sample, err := s.clocks.Sample(s.clockID) + if err != nil { + return 0, err + } + + return sample.ref, nil +} + +// Cycles returns a raw TSC value. +func (s *sampler) Cycles() TSCValue { + return s.clocks.Cycles() +} + +// Range returns the widest range of clock samples available. +func (s *sampler) Range() (sample, sample, bool) { + if len(s.samples) < 2 { + return sample{}, sample{}, false + } + + return s.samples[0], s.samples[len(s.samples)-1], true +} diff --git a/pkg/sentry/time/sampler_test.go b/pkg/sentry/time/sampler_test.go new file mode 100644 index 000000000..caf7e5c53 --- /dev/null +++ b/pkg/sentry/time/sampler_test.go @@ -0,0 +1,183 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "errors" + "testing" +) + +// errNoSamples is returned when testReferenceClocks runs out of samples. +var errNoSamples = errors.New("no samples available") + +// testReferenceClocks returns a preset list of samples and cycle counts. +type testReferenceClocks struct { + samples []sample + cycles []TSCValue +} + +// Sample implements referenceClocks.Sample, returning the next sample in the list. +func (t *testReferenceClocks) Sample(_ ClockID) (sample, error) { + if len(t.samples) == 0 { + return sample{}, errNoSamples + } + + s := t.samples[0] + if len(t.samples) == 1 { + t.samples = nil + } else { + t.samples = t.samples[1:] + } + + return s, nil +} + +// Cycles implements referenceClocks.Cycles, returning the next TSCValue in the list. +func (t *testReferenceClocks) Cycles() TSCValue { + if len(t.cycles) == 0 { + return 0 + } + + c := t.cycles[0] + if len(t.cycles) == 1 { + t.cycles = nil + } else { + t.cycles = t.cycles[1:] + } + + return c +} + +// newTestSampler returns a sampler that collects samples from +// the given sample list and cycle counts from the given cycle list. +func newTestSampler(samples []sample, cycles []TSCValue) *sampler { + return &sampler{ + clocks: &testReferenceClocks{ + samples: samples, + cycles: cycles, + }, + overhead: defaultOverheadCycles, + } +} + +// generateSamples generates n samples with the given overhead. +func generateSamples(n int, overhead TSCValue) []sample { + samples := []sample{{before: 1000000, after: 1000000 + overhead, ref: 100}} + for i := 0; i < n-1; i++ { + prev := samples[len(samples)-1] + samples = append(samples, sample{ + before: prev.before + 1000000, + after: prev.after + 1000000, + ref: prev.ref + 100, + }) + } + return samples +} + +// TestSample ensures that samples can be collected. +func TestSample(t *testing.T) { + testCases := []struct { + name string + samples []sample + err error + }{ + { + name: "basic", + samples: []sample{ + {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100}, + }, + err: nil, + }, + { + // Sample with backwards TSC ignored. + // referenceClock should retry and get errNoSamples. + name: "backwards-tsc-ignored", + samples: []sample{ + {before: 100000, after: 90000, ref: 100}, + }, + err: errNoSamples, + }, + { + // Sample far above overhead skipped. + // referenceClock should retry and get errNoSamples. + name: "reject-overhead", + samples: []sample{ + {before: 100000, after: 100000 + 5*defaultOverheadCycles, ref: 100}, + }, + err: errNoSamples, + }, + { + // Maximum overhead allowed is bounded. + name: "over-max-overhead", + // Generate a bunch of samples. The reference clock + // needs a while to ramp up its expected overhead. + samples: generateSamples(100, 2*maxOverheadCycles), + err: errOverheadTooHigh, + }, + { + // Overhead at maximum overhead is allowed. + name: "max-overhead", + // Generate a bunch of samples. The reference clock + // needs a while to ramp up its expected overhead. + samples: generateSamples(100, maxOverheadCycles), + err: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s := newTestSampler(tc.samples, nil) + err := s.Sample() + if err != tc.err { + t.Errorf("Sample err got %v want %v", err, tc.err) + } + }) + } +} + +// TestOutliersIgnored tests that referenceClock ignores samples with very high +// overhead. +func TestOutliersIgnored(t *testing.T) { + s := newTestSampler([]sample{ + {before: 100000, after: 100000 + defaultOverheadCycles, ref: 100}, + {before: 200000, after: 200000 + defaultOverheadCycles, ref: 200}, + {before: 300000, after: 300000 + defaultOverheadCycles, ref: 300}, + {before: 400000, after: 400000 + defaultOverheadCycles, ref: 400}, + {before: 500000, after: 500000 + 5*defaultOverheadCycles, ref: 500}, // Ignored + {before: 600000, after: 600000 + defaultOverheadCycles, ref: 600}, + {before: 700000, after: 700000 + defaultOverheadCycles, ref: 700}, + }, nil) + + // Collect 5 samples. + for i := 0; i < 5; i++ { + err := s.Sample() + if err != nil { + t.Fatalf("Unexpected error while sampling: %v", err) + } + } + + oldest, newest, ok := s.Range() + if !ok { + t.Fatalf("Range not ok") + } + + if oldest.ref != 100 { + t.Errorf("oldest.ref got %v want %v", oldest.ref, 100) + } + + // We skipped the high-overhead sample. + if newest.ref != 600 { + t.Errorf("newest.ref got %v want %v", newest.ref, 600) + } +} diff --git a/pkg/sentry/time/sampler_unsafe.go b/pkg/sentry/time/sampler_unsafe.go new file mode 100644 index 000000000..7ea19d387 --- /dev/null +++ b/pkg/sentry/time/sampler_unsafe.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package time + +import ( + "syscall" + "unsafe" +) + +// syscallTSCReferenceClocks is the standard referenceClocks, collecting +// samples using CLOCK_GETTIME and RDTSC. +type syscallTSCReferenceClocks struct { + tscCycleClock +} + +// Sample implements sampler.Sample. +func (syscallTSCReferenceClocks) Sample(c ClockID) (sample, error) { + var s sample + + s.before = Rdtsc() + + // Don't call clockGettime to avoid a call which may call morestack. + var ts syscall.Timespec + _, _, e := syscall.RawSyscall(syscall.SYS_CLOCK_GETTIME, uintptr(c), uintptr(unsafe.Pointer(&ts)), 0) + if e != 0 { + return sample{}, e + } + + s.after = Rdtsc() + s.ref = ReferenceNS(ts.Nano()) + + return s, nil +} + +// clockGettime calls SYS_CLOCK_GETTIME, returning time in nanoseconds. +func clockGettime(c ClockID) (ReferenceNS, error) { + var ts syscall.Timespec + _, _, e := syscall.RawSyscall(syscall.SYS_CLOCK_GETTIME, uintptr(c), uintptr(unsafe.Pointer(&ts)), 0) + if e != 0 { + return 0, e + } + + return ReferenceNS(ts.Nano()), nil +} diff --git a/pkg/sentry/time/tsc_amd64.s b/pkg/sentry/time/tsc_amd64.s new file mode 100644 index 000000000..4cc604392 --- /dev/null +++ b/pkg/sentry/time/tsc_amd64.s @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" + +TEXT ·Rdtsc(SB),NOSPLIT,$0-8 + // N.B. We need LFENCE on Intel, AMD is more complicated. + // Modern AMD CPUs with modern kernels make LFENCE behave like it does + // on Intel with MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT. MFENCE is + // otherwise needed on AMD. + LFENCE + RDTSC + SHLQ $32, DX + ADDQ DX, AX + MOVQ AX, ret+0(FP) + RET diff --git a/pkg/sentry/uniqueid/BUILD b/pkg/sentry/uniqueid/BUILD new file mode 100644 index 000000000..c8ab03c3d --- /dev/null +++ b/pkg/sentry/uniqueid/BUILD @@ -0,0 +1,11 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "uniqueid", + srcs = ["context.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/uniqueid", + visibility = ["//pkg/sentry:internal"], + deps = ["//pkg/sentry/context"], +) diff --git a/pkg/sentry/uniqueid/context.go b/pkg/sentry/uniqueid/context.go new file mode 100644 index 000000000..eeb8c4286 --- /dev/null +++ b/pkg/sentry/uniqueid/context.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package uniqueid defines context.Context keys for obtaining system-wide +// unique identifiers. +package uniqueid + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// contextID is the kernel package's type for context.Context.Value keys. +type contextID int + +const ( + // CtxGlobalUniqueID is a Context.Value key for a system-wide + // unique identifier. + CtxGlobalUniqueID contextID = iota + + // CtxInotifyCookie is a Context.Value key for a unique inotify + // event cookie. + CtxInotifyCookie +) + +// GlobalFromContext returns a system-wide unique identifier from ctx. +func GlobalFromContext(ctx context.Context) uint64 { + return ctx.Value(CtxGlobalUniqueID).(uint64) +} + +// InotifyCookie generates a unique inotify event cookie from ctx. +func InotifyCookie(ctx context.Context) uint32 { + return ctx.Value(CtxInotifyCookie).(uint32) +} diff --git a/pkg/sentry/usage/BUILD b/pkg/sentry/usage/BUILD new file mode 100644 index 000000000..a0fe0aa07 --- /dev/null +++ b/pkg/sentry/usage/BUILD @@ -0,0 +1,38 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "usage_state", + srcs = [ + "cpu.go", + "io.go", + "memory.go", + ], + out = "usage_state.go", + package = "usage", +) + +go_library( + name = "usage", + srcs = [ + "cpu.go", + "io.go", + "memory.go", + "memory_unsafe.go", + "usage.go", + "usage_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/usage", + visibility = [ + "//pkg/sentry:internal", + ], + deps = [ + "//pkg/bits", + "//pkg/log", + "//pkg/sentry/memutil", + "//pkg/state", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/sentry/usage/cpu.go b/pkg/sentry/usage/cpu.go new file mode 100644 index 000000000..1c2cc90e1 --- /dev/null +++ b/pkg/sentry/usage/cpu.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usage + +import ( + "time" +) + +// CPUStats contains the subset of struct rusage fields that relate to CPU +// scheduling. +type CPUStats struct { + // UserTime is the amount of time spent executing application code. + UserTime time.Duration + + // SysTime is the amount of time spent executing sentry code. + SysTime time.Duration + + // VoluntarySwitches is the number of times control has been voluntarily + // ceded due to blocking, etc. + VoluntarySwitches uint64 + + // InvoluntarySwitches (struct rusage::ru_nivcsw) is unsupported, since + // "preemptive" scheduling is managed by the Go runtime, which doesn't + // provide this information. +} + +// Accumulate adds s2 to s. +func (s *CPUStats) Accumulate(s2 CPUStats) { + s.UserTime += s2.UserTime + s.SysTime += s2.SysTime + s.VoluntarySwitches += s2.VoluntarySwitches +} diff --git a/pkg/sentry/usage/io.go b/pkg/sentry/usage/io.go new file mode 100644 index 000000000..a05053c32 --- /dev/null +++ b/pkg/sentry/usage/io.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usage + +import ( + "sync/atomic" +) + +// IO contains I/O-related statistics. +type IO struct { + // CharsRead is the number of bytes read by read syscalls. + CharsRead uint64 + + // CharsWritten is the number of bytes written by write syscalls. + CharsWritten uint64 + + // ReadSyscalls is the number of read syscalls. + ReadSyscalls uint64 + + // WriteSyscalls is the number of write syscalls. + WriteSyscalls uint64 + + // The following counter is only meaningful when Sentry has internal + // pagecache. + + // BytesRead is the number of bytes actually read into pagecache. + BytesRead uint64 + + // BytesWritten is the number of bytes actually written from pagecache. + BytesWritten uint64 + + // BytesWriteCancelled is the number of bytes not written out due to + // truncation. + BytesWriteCancelled uint64 +} + +// AccountReadSyscall does the accounting for a read syscall. +func (i *IO) AccountReadSyscall(bytes int64) { + atomic.AddUint64(&i.ReadSyscalls, 1) + if bytes > 0 { + atomic.AddUint64(&i.CharsRead, uint64(bytes)) + } +} + +// AccountWriteSyscall does the accounting for a write syscall. +func (i *IO) AccountWriteSyscall(bytes int64) { + atomic.AddUint64(&i.WriteSyscalls, 1) + if bytes > 0 { + atomic.AddUint64(&i.CharsWritten, uint64(bytes)) + } +} + +// AccountReadIO does the accounting for a read IO into the file system. +func (i *IO) AccountReadIO(bytes int64) { + if bytes > 0 { + atomic.AddUint64(&i.BytesRead, uint64(bytes)) + } +} + +// AccountWriteIO does the accounting for a write IO into the file system. +func (i *IO) AccountWriteIO(bytes int64) { + if bytes > 0 { + atomic.AddUint64(&i.BytesWritten, uint64(bytes)) + } +} + +// Accumulate adds up io usages. +func (i *IO) Accumulate(io *IO) { + atomic.AddUint64(&i.CharsRead, atomic.LoadUint64(&io.CharsRead)) + atomic.AddUint64(&i.CharsWritten, atomic.LoadUint64(&io.CharsWritten)) + atomic.AddUint64(&i.ReadSyscalls, atomic.LoadUint64(&io.ReadSyscalls)) + atomic.AddUint64(&i.WriteSyscalls, atomic.LoadUint64(&io.WriteSyscalls)) + atomic.AddUint64(&i.BytesRead, atomic.LoadUint64(&io.BytesRead)) + atomic.AddUint64(&i.BytesWritten, atomic.LoadUint64(&io.BytesWritten)) + atomic.AddUint64(&i.BytesWriteCancelled, atomic.LoadUint64(&io.BytesWriteCancelled)) +} diff --git a/pkg/sentry/usage/memory.go b/pkg/sentry/usage/memory.go new file mode 100644 index 000000000..5d1b3a595 --- /dev/null +++ b/pkg/sentry/usage/memory.go @@ -0,0 +1,282 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usage + +import ( + "fmt" + "os" + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/bits" + "gvisor.googlesource.com/gvisor/pkg/sentry/memutil" +) + +// MemoryKind represents a type of memory used by the application. +// +// For efficiency reasons, it is assumed that the Memory implementation is +// responsible for specific stats (documented below), and those may be reported +// in aggregate independently. See the platform.Memory interface as well as the +// control.Usage.Collect method for more information. +type MemoryKind int + +const ( + // System represents miscellaneous system memory. This may include + // memory that is in the process of being reclaimed, system caches, + // page tables, swap, etc. + // + // This memory kind is backed by platform memory. + System MemoryKind = iota + + // Anonymous represents anonymous application memory. + // + // This memory kind is backed by platform memory. + Anonymous + + // PageCache represents memory allocated to back sandbox-visible files that + // do not have a local fd. The contents of these files are buffered in + // memory to support application mmaps. + // + // This memory kind is backed by platform memory. + PageCache + + // Tmpfs represents memory used by the sandbox-visible tmpfs. + // + // This memory kind is backed by platform memory. + Tmpfs + + // Ramdiskfs represents memory used by the ramdiskfs. + // + // This memory kind is backed by platform memory. + Ramdiskfs + + // Mapped represents memory related to files which have a local fd on the + // host, and thus can be directly mapped. Typically these are files backed + // by gofers with donated-fd support. Note that this value may not track the + // exact amount of memory used by mapping on the host, because we don't have + // any visibility into the host kernel memory management. In particular, + // once we map some part of a host file, the host kernel is free to + // abitrarily populate/decommit the pages, which it may do for various + // reasons (ex. host memory reclaim, NUMA balancing). + // + // This memory kind is backed by the host pagecache, via host mmaps. + Mapped +) + +// MemoryStats tracks application memory usage in bytes. All fields correspond to the +// memory category with the same name. This object is thread-safe if accessed +// through the provided methods. The public fields may be safely accessed +// directly on a copy of the object obtained from Memory.Copy(). +type MemoryStats struct { + System uint64 + Anonymous uint64 + PageCache uint64 + Tmpfs uint64 + // Lazily updated based on the value in RTMapped. + Mapped uint64 + Ramdiskfs uint64 +} + +// RTMemoryStats contains the memory usage values that need to be directly +// exposed through a shared memory file for real-time access. These are +// categories not backed by platform memory. For details about how this works, +// see the memory accounting docs. +// +// N.B. Please keep the struct in sync with the API. Noteably, changes to this +// struct requires a version bump and addition of compatibility logic in the +// control server. As a special-case, adding fields without re-ordering existing +// ones do not require a version bump because the mapped page we use is +// initially zeroed. Any added field will be ignored by an older API and will be +// zero if read by a newer API. +type RTMemoryStats struct { + RTMapped uint64 +} + +// MemoryLocked is Memory with access methods. +type MemoryLocked struct { + mu sync.RWMutex + // MemoryStats records the memory stats. + MemoryStats + // RTMemoryStats records the memory stats that need to be exposed through + // shared page. + *RTMemoryStats + // File is the backing file storing the memory stats. + File *os.File +} + +func newMemoryLocked() MemoryLocked { + name := "memory-usage" + fd, err := memutil.CreateMemFD(name, 0) + if err != nil { + panic("error creating usage file: " + err.Error()) + } + file := os.NewFile(uintptr(fd), name) + if err := file.Truncate(int64(RTMemoryStatsSize)); err != nil { + panic("error truncating usage file: " + err.Error()) + } + // Note: We rely on the returned page being initially zeroed. This will + // always be the case for a newly mapped page from /dev/shm. If we obtain + // the shared memory through some other means in the future, we may have to + // explicitly zero the page. + mmap, err := syscall.Mmap(int(file.Fd()), 0, int(RTMemoryStatsSize), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + panic("error mapping usage file: " + err.Error()) + } + + return MemoryLocked{ + File: file, + RTMemoryStats: RTMemoryStatsPointer(mmap), + } +} + +// MemoryAccounting is the global memory stats. +// +// There is no need to save or restore the global memory accounting object, +// because individual frame kinds are saved and charged only when they become +// resident. +var MemoryAccounting = newMemoryLocked() + +func (m *MemoryLocked) incLocked(val uint64, kind MemoryKind) { + switch kind { + case System: + atomic.AddUint64(&m.System, val) + case Anonymous: + atomic.AddUint64(&m.Anonymous, val) + case PageCache: + atomic.AddUint64(&m.PageCache, val) + case Mapped: + atomic.AddUint64(&m.RTMapped, val) + case Tmpfs: + atomic.AddUint64(&m.Tmpfs, val) + case Ramdiskfs: + atomic.AddUint64(&m.Ramdiskfs, val) + default: + panic(fmt.Sprintf("invalid memory kind: %v", kind)) + } +} + +// Inc adds an additional usage of 'val' bytes to memory category 'kind'. +// +// This method is thread-safe. +func (m *MemoryLocked) Inc(val uint64, kind MemoryKind) { + m.mu.RLock() + m.incLocked(val, kind) + m.mu.RUnlock() +} + +func (m *MemoryLocked) decLocked(val uint64, kind MemoryKind) { + switch kind { + case System: + atomic.AddUint64(&m.System, ^(val - 1)) + case Anonymous: + atomic.AddUint64(&m.Anonymous, ^(val - 1)) + case PageCache: + atomic.AddUint64(&m.PageCache, ^(val - 1)) + case Mapped: + atomic.AddUint64(&m.RTMapped, ^(val - 1)) + case Tmpfs: + atomic.AddUint64(&m.Tmpfs, ^(val - 1)) + case Ramdiskfs: + atomic.AddUint64(&m.Ramdiskfs, ^(val - 1)) + default: + panic(fmt.Sprintf("invalid memory kind: %v", kind)) + } +} + +// Dec remove a usage of 'val' bytes from memory category 'kind'. +// +// This method is thread-safe. +func (m *MemoryLocked) Dec(val uint64, kind MemoryKind) { + m.mu.RLock() + m.decLocked(val, kind) + m.mu.RUnlock() +} + +// Move moves a usage of 'val' bytes from 'from' to 'to'. +// +// This method is thread-safe. +func (m *MemoryLocked) Move(val uint64, to MemoryKind, from MemoryKind) { + m.mu.RLock() + // Just call decLocked and incLocked directly. We held the RLock to + // protect against concurrent callers to Total(). + m.decLocked(val, from) + m.incLocked(val, to) + m.mu.RUnlock() +} + +// totalLocked returns a total usage. +// +// Precondition: must be called when locked. +func (m *MemoryLocked) totalLocked() (total uint64) { + total += atomic.LoadUint64(&m.System) + total += atomic.LoadUint64(&m.Anonymous) + total += atomic.LoadUint64(&m.PageCache) + total += atomic.LoadUint64(&m.RTMapped) + total += atomic.LoadUint64(&m.Tmpfs) + total += atomic.LoadUint64(&m.Ramdiskfs) + return +} + +// Total returns a total memory usage. +// +// This method is thread-safe. +func (m *MemoryLocked) Total() uint64 { + m.mu.Lock() + defer m.mu.Unlock() + return m.totalLocked() +} + +// Copy returns a copy of the structure with a total. +// +// This method is thread-safe. +func (m *MemoryLocked) Copy() (MemoryStats, uint64) { + m.mu.Lock() + defer m.mu.Unlock() + ms := m.MemoryStats + ms.Mapped = m.RTMapped + return ms, m.totalLocked() +} + +// MinimumTotalMemoryBytes is the minimum reported total system memory. +var MinimumTotalMemoryBytes uint64 = 2 << 30 // 2 GB + +// TotalMemory returns the "total usable memory" available. +// +// This number doesn't really have a true value so it's based on the following +// inputs and further bounded to be above some minimum guaranteed value (2GB), +// additionally ensuring that total memory reported is always less than used. +// +// memSize should be the platform.Memory size reported by platform.Memory.TotalSize() +// used is the total memory reported by MemoryLocked.Total() +func TotalMemory(memSize, used uint64) uint64 { + if memSize < MinimumTotalMemoryBytes { + memSize = MinimumTotalMemoryBytes + } + if memSize < used { + memSize = used + // Bump totalSize to the next largest power of 2, if one exists, so + // that MemFree isn't 0. + if msb := bits.MostSignificantOne64(memSize); msb < 63 { + memSize = uint64(1) << (uint(msb) + 1) + } + } + return memSize +} + +// IncrementalMappedAccounting controls whether host mapped memory is accounted +// incrementally during map translation. This may be modified during early +// initialization, and is read-only afterward. +var IncrementalMappedAccounting = false diff --git a/pkg/sentry/usage/memory_unsafe.go b/pkg/sentry/usage/memory_unsafe.go new file mode 100644 index 000000000..f990a7750 --- /dev/null +++ b/pkg/sentry/usage/memory_unsafe.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usage + +import ( + "unsafe" +) + +// RTMemoryStatsSize is the size of the RTMemoryStats struct. +var RTMemoryStatsSize = unsafe.Sizeof(RTMemoryStats{}) + +// RTMemoryStatsPointer casts the address of the byte slice into a RTMemoryStats pointer. +func RTMemoryStatsPointer(b []byte) *RTMemoryStats { + return (*RTMemoryStats)(unsafe.Pointer(&b[0])) +} diff --git a/pkg/sentry/usage/usage.go b/pkg/sentry/usage/usage.go new file mode 100644 index 000000000..3b3118659 --- /dev/null +++ b/pkg/sentry/usage/usage.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package usage provides representations of resource usage. +package usage diff --git a/pkg/sentry/usermem/BUILD b/pkg/sentry/usermem/BUILD new file mode 100644 index 000000000..36c0760dd --- /dev/null +++ b/pkg/sentry/usermem/BUILD @@ -0,0 +1,70 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "usermem_state", + srcs = [ + "access_type.go", + "addr.go", + "addr_range.go", + "addr_range_seq_unsafe.go", + ], + out = "usermem_state.go", + package = "usermem", +) + +go_template_instance( + name = "addr_range", + out = "addr_range.go", + package = "usermem", + prefix = "Addr", + template = "//pkg/segment:generic_range", + types = { + "T": "Addr", + }, +) + +go_library( + name = "usermem", + srcs = [ + "access_type.go", + "addr.go", + "addr_range.go", + "addr_range_seq_unsafe.go", + "bytes_io.go", + "bytes_io_unsafe.go", + "usermem.go", + "usermem_state.go", + "usermem_x86.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/usermem", + visibility = ["//pkg/sentry:internal"], + deps = [ + "//pkg/atomicbitops", + "//pkg/binary", + "//pkg/log", + "//pkg/sentry/context", + "//pkg/sentry/safemem", + "//pkg/state", + "//pkg/syserror", + "//pkg/tcpip/buffer", + ], +) + +go_test( + name = "usermem_test", + size = "small", + srcs = [ + "addr_range_seq_test.go", + "usermem_test.go", + ], + embed = [":usermem"], + deps = [ + "//pkg/sentry/context", + "//pkg/sentry/safemem", + "//pkg/syserror", + ], +) diff --git a/pkg/sentry/usermem/README.md b/pkg/sentry/usermem/README.md new file mode 100644 index 000000000..2ebd3bcc1 --- /dev/null +++ b/pkg/sentry/usermem/README.md @@ -0,0 +1,31 @@ +This package defines primitives for sentry access to application memory. + +Major types: + +- The `IO` interface represents a virtual address space and provides I/O methods + on that address space. `IO` is the lowest-level primitive. The primary + implementation of the `IO` interface is `mm.MemoryManager`. + +- `IOSequence` represents a collection of individually-contiguous address ranges + in a `IO` that is operated on sequentially, analogous to Linux's `struct + iov_iter`. + +Major usage patterns: + +- Access to a task's virtual memory, subject to the application's memory + protections and while running on that task's goroutine, from a context that is + at or above the level of the `kernel` package (e.g. most syscall + implementations in `syscalls/linux`); use the `kernel.Task.Copy*` wrappers + defined in `kernel/task_usermem.go`. + +- Access to a task's virtual memory, from a context that is at or above the + level of the `kernel` package, but where any of the above constraints does not + hold (e.g. `PTRACE_POKEDATA`, which ignores application memory protections); + obtain the task's `mm.MemoryManager` by calling `kernel.Task.MemoryManager`, + and call its `IO` methods directly. + +- Access to a task's virtual memory, from a context that is below the level of + the `kernel` package (e.g. filesystem I/O); clients must pass I/O arguments + from higher layers, usually in the form of an `IOSequence`. The + `kernel.Task.SingleIOSequence` and `kernel.Task.IovecsIOSequence` functions in + `kernel/task_usermem.go` are convenience functions for doing so. diff --git a/pkg/sentry/usermem/access_type.go b/pkg/sentry/usermem/access_type.go new file mode 100644 index 000000000..7eabecf30 --- /dev/null +++ b/pkg/sentry/usermem/access_type.go @@ -0,0 +1,117 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "syscall" +) + +// AccessType specifies memory access types. This is used for +// setting mapping permissions, as well as communicating faults. +type AccessType struct { + // Read is read access. + Read bool + + // Write is write access. + Write bool + + // Execute is executable access. + Execute bool +} + +// String returns a pretty representation of access. This looks like the +// familiar r-x, rw-, etc. and can be relied on as such. +func (a AccessType) String() string { + bits := [3]byte{'-', '-', '-'} + if a.Read { + bits[0] = 'r' + } + if a.Write { + bits[1] = 'w' + } + if a.Execute { + bits[2] = 'x' + } + return string(bits[:]) +} + +// Any returns true iff at least one of Read, Write or Execute is true. +func (a AccessType) Any() bool { + return a.Read || a.Write || a.Execute +} + +// Prot returns the system prot (syscall.PROT_READ, etc.) for this access. +func (a AccessType) Prot() int { + var prot int + if a.Read { + prot |= syscall.PROT_READ + } + if a.Write { + prot |= syscall.PROT_WRITE + } + if a.Execute { + prot |= syscall.PROT_EXEC + } + return prot +} + +// SupersetOf returns true iff the access types in a are a superset of the +// access types in other. +func (a AccessType) SupersetOf(other AccessType) bool { + if !a.Read && other.Read { + return false + } + if !a.Write && other.Write { + return false + } + if !a.Execute && other.Execute { + return false + } + return true +} + +// Intersect returns the access types set in both a and other. +func (a AccessType) Intersect(other AccessType) AccessType { + return AccessType{ + Read: a.Read && other.Read, + Write: a.Write && other.Write, + Execute: a.Execute && other.Execute, + } +} + +// Effective returns the set of effective access types allowed by a, even if +// some types are not explicitly allowed. +func (a AccessType) Effective() AccessType { + // In Linux, Write and Execute access generally imply Read access. See + // mm/mmap.c:protection_map. + // + // The notable exception is get_user_pages, which only checks against + // the original vma flags. That said, most user memory accesses do not + // use GUP. + if a.Write || a.Execute { + a.Read = true + } + return a +} + +// Convenient access types. +var ( + NoAccess = AccessType{} + Read = AccessType{Read: true} + Write = AccessType{Write: true} + Execute = AccessType{Execute: true} + ReadWrite = AccessType{Read: true, Write: true} + AnyAccess = AccessType{Read: true, Write: true, Execute: true} +) diff --git a/pkg/sentry/usermem/addr.go b/pkg/sentry/usermem/addr.go new file mode 100644 index 000000000..d175fdc74 --- /dev/null +++ b/pkg/sentry/usermem/addr.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "fmt" +) + +// Addr represents a generic virtual address. +type Addr uintptr + +// AddLength adds the given length to start and returns the result. ok is true +// iff adding the length did not overflow the range of Addr. +// +// Note: This function is usually used to get the end of an address range +// defined by its start address and length. Since the resulting end is +// exclusive, end == 0 is technically valid, and corresponds to a range that +// extends to the end of the address space, but ok will be false. This isn't +// expected to ever come up in practice. +func (v Addr) AddLength(length uint64) (end Addr, ok bool) { + end = v + Addr(length) + // The second half of the following check is needed in case uintptr is + // smaller than 64 bits. + ok = end >= v && length <= uint64(^Addr(0)) + return +} + +// RoundDown returns the address rounded down to the nearest page boundary. +func (v Addr) RoundDown() Addr { + return v & ^Addr(PageSize-1) +} + +// RoundUp returns the address rounded up to the nearest page boundary. ok is +// true iff rounding up did not wrap around. +func (v Addr) RoundUp() (addr Addr, ok bool) { + addr = Addr(v + PageSize - 1).RoundDown() + ok = addr >= v + return +} + +// MustRoundUp is equivalent to RoundUp, but panics if rounding up wraps +// around. +func (v Addr) MustRoundUp() Addr { + addr, ok := v.RoundUp() + if !ok { + panic(fmt.Sprintf("usermem.Addr(%d).RoundUp() wraps", v)) + } + return addr +} + +// HugeRoundDown returns the address rounded down to the nearest huge page +// boundary. +func (v Addr) HugeRoundDown() Addr { + return v & ^Addr(HugePageSize-1) +} + +// HugeRoundUp returns the address rounded up to the nearest huge page boundary. +// ok is true iff rounding up did not wrap around. +func (v Addr) HugeRoundUp() (addr Addr, ok bool) { + addr = Addr(v + HugePageSize - 1).HugeRoundDown() + ok = addr >= v + return +} + +// PageOffset returns the offset of v into the current page. +func (v Addr) PageOffset() uint64 { + return uint64(v & Addr(PageSize-1)) +} + +// IsPageAligned returns true if v.PageOffset() == 0. +func (v Addr) IsPageAligned() bool { + return v.PageOffset() == 0 +} + +// AddrRange is a range of Addrs. +// +// type AddrRange <generated by go_generics> + +// ToRange returns [v, v+length). +func (v Addr) ToRange(length uint64) (AddrRange, bool) { + end, ok := v.AddLength(length) + return AddrRange{v, end}, ok +} + +// IsPageAligned returns true if ar.Start.IsPageAligned() and +// ar.End.IsPageAligned(). +func (ar AddrRange) IsPageAligned() bool { + return ar.Start.IsPageAligned() && ar.End.IsPageAligned() +} + +// String implements fmt.Stringer.String. +func (ar AddrRange) String() string { + return fmt.Sprintf("[%#x, %#x)", ar.Start, ar.End) +} diff --git a/pkg/sentry/usermem/addr_range_seq_test.go b/pkg/sentry/usermem/addr_range_seq_test.go new file mode 100644 index 000000000..cf9d785ed --- /dev/null +++ b/pkg/sentry/usermem/addr_range_seq_test.go @@ -0,0 +1,197 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "testing" +) + +var addrRangeSeqTests = []struct { + desc string + ranges []AddrRange +}{ + { + desc: "Empty sequence", + }, + { + desc: "Single empty AddrRange", + ranges: []AddrRange{ + {0x10, 0x10}, + }, + }, + { + desc: "Single non-empty AddrRange of length 1", + ranges: []AddrRange{ + {0x10, 0x11}, + }, + }, + { + desc: "Single non-empty AddrRange of length 2", + ranges: []AddrRange{ + {0x10, 0x12}, + }, + }, + { + desc: "Multiple non-empty AddrRanges", + ranges: []AddrRange{ + {0x10, 0x11}, + {0x20, 0x22}, + }, + }, + { + desc: "Multiple AddrRanges including empty AddrRanges", + ranges: []AddrRange{ + {0x10, 0x10}, + {0x20, 0x20}, + {0x30, 0x33}, + {0x40, 0x44}, + {0x50, 0x50}, + {0x60, 0x60}, + {0x70, 0x77}, + {0x80, 0x88}, + {0x90, 0x90}, + {0xa0, 0xa0}, + }, + }, +} + +func testAddrRangeSeqEqualityWithTailIteration(t *testing.T, ars AddrRangeSeq, wantRanges []AddrRange) { + var wantLen int64 + for _, ar := range wantRanges { + wantLen += int64(ar.Length()) + } + + var i int + for !ars.IsEmpty() { + if gotLen := ars.NumBytes(); gotLen != wantLen { + t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen) + } + if gotN, wantN := ars.NumRanges(), len(wantRanges)-i; gotN != wantN { + t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted %d", i, ars, gotN, wantN) + } + got := ars.Head() + if i >= len(wantRanges) { + t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got) + } else if want := wantRanges[i]; got != want { + t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want) + } + ars = ars.Tail() + wantLen -= int64(got.Length()) + i++ + } + if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 { + t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen) + } + if gotN := ars.NumRanges(); gotN != 0 { + t.Errorf("Iteration %d: %v.NumRanges(): got %d, wanted 0", i, ars, gotN) + } +} + +func TestAddrRangeSeqTailIteration(t *testing.T) { + for _, test := range addrRangeSeqTests { + t.Run(test.desc, func(t *testing.T) { + testAddrRangeSeqEqualityWithTailIteration(t, AddrRangeSeqFromSlice(test.ranges), test.ranges) + }) + } +} + +func TestAddrRangeSeqDropFirstEmpty(t *testing.T) { + var ars AddrRangeSeq + if got, want := ars.DropFirst(1), ars; got != want { + t.Errorf("%v.DropFirst(1): got %v, wanted %v", ars, got, want) + } +} + +func TestAddrRangeSeqDropSingleByteIteration(t *testing.T) { + // Tests AddrRangeSeq iteration using Head/DropFirst, simulating + // I/O-per-AddrRange. + for _, test := range addrRangeSeqTests { + t.Run(test.desc, func(t *testing.T) { + // Figure out what AddrRanges we expect to see. + var wantLen int64 + var wantRanges []AddrRange + for _, ar := range test.ranges { + wantLen += int64(ar.Length()) + wantRanges = append(wantRanges, ar) + if ar.Length() == 0 { + // We "do" 0 bytes of I/O and then call DropFirst(0), + // advancing to the next AddrRange. + continue + } + // Otherwise we "do" 1 byte of I/O and then call DropFirst(1), + // advancing the AddrRange by 1 byte, or to the next AddrRange + // if this one is exhausted. + for ar.Start++; ar.Length() != 0; ar.Start++ { + wantRanges = append(wantRanges, ar) + } + } + t.Logf("Expected AddrRanges: %s (%d bytes)", wantRanges, wantLen) + + ars := AddrRangeSeqFromSlice(test.ranges) + var i int + for !ars.IsEmpty() { + if gotLen := ars.NumBytes(); gotLen != wantLen { + t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d", i, ars, gotLen, wantLen) + } + got := ars.Head() + if i >= len(wantRanges) { + t.Errorf("Iteration %d: %v.Head(): got %s, wanted <end of sequence>", i, ars, got) + } else if want := wantRanges[i]; got != want { + t.Errorf("Iteration %d: %v.Head(): got %s, wanted %s", i, ars, got, want) + } + if got.Length() == 0 { + ars = ars.DropFirst(0) + } else { + ars = ars.DropFirst(1) + wantLen-- + } + i++ + } + if gotLen := ars.NumBytes(); gotLen != 0 || wantLen != 0 { + t.Errorf("Iteration %d: %v.NumBytes(): got %d, wanted %d (which should be 0)", i, ars, gotLen, wantLen) + } + }) + } +} + +func TestAddrRangeSeqTakeFirstEmpty(t *testing.T) { + var ars AddrRangeSeq + if got, want := ars.TakeFirst(1), ars; got != want { + t.Errorf("%v.TakeFirst(1): got %v, wanted %v", ars, got, want) + } +} + +func TestAddrRangeSeqTakeFirst(t *testing.T) { + ranges := []AddrRange{ + {0x10, 0x11}, + {0x20, 0x22}, + {0x30, 0x30}, + {0x40, 0x44}, + {0x50, 0x55}, + {0x60, 0x60}, + {0x70, 0x77}, + } + ars := AddrRangeSeqFromSlice(ranges).TakeFirst(5) + want := []AddrRange{ + {0x10, 0x11}, // +1 byte (total 1 byte), not truncated + {0x20, 0x22}, // +2 bytes (total 3 bytes), not truncated + {0x30, 0x30}, // +0 bytes (total 3 bytes), no change + {0x40, 0x42}, // +2 bytes (total 5 bytes), partially truncated + {0x50, 0x50}, // +0 bytes (total 5 bytes), fully truncated + {0x60, 0x60}, // +0 bytes (total 5 bytes), "fully truncated" (no change) + {0x70, 0x70}, // +0 bytes (total 5 bytes), fully truncated + } + testAddrRangeSeqEqualityWithTailIteration(t, ars, want) +} diff --git a/pkg/sentry/usermem/addr_range_seq_unsafe.go b/pkg/sentry/usermem/addr_range_seq_unsafe.go new file mode 100644 index 000000000..13b2998b3 --- /dev/null +++ b/pkg/sentry/usermem/addr_range_seq_unsafe.go @@ -0,0 +1,277 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "bytes" + "fmt" + "reflect" + "unsafe" +) + +// An AddrRangeSeq represents a sequence of AddrRanges. +// +// AddrRangeSeqs are immutable and may be copied by value. The zero value of +// AddrRangeSeq represents an empty sequence. +// +// An AddrRangeSeq may contain AddrRanges with a length of 0. This is necessary +// since zero-length AddrRanges are significant to MM bounds checks. +type AddrRangeSeq struct { + // If length is 0, then the AddrRangeSeq represents no AddrRanges. + // Invariants: data == 0; offset == 0; limit == 0. + // + // If length is 1, then the AddrRangeSeq represents the single + // AddrRange{offset, offset+limit}. Invariants: data == 0. + // + // Otherwise, length >= 2, and the AddrRangeSeq represents the `length` + // AddrRanges in the array of AddrRanges starting at address `data`, + // starting at `offset` bytes into the first AddrRange and limited to the + // following `limit` bytes. (AddrRanges after `limit` are still iterated, + // but are truncated to a length of 0.) Invariants: data != 0; offset <= + // data[0].Length(); limit > 0; offset+limit <= the combined length of all + // AddrRanges in the array. + data unsafe.Pointer + length int + offset Addr + limit Addr +} + +// AddrRangeSeqOf returns an AddrRangeSeq representing the single AddrRange ar. +func AddrRangeSeqOf(ar AddrRange) AddrRangeSeq { + return AddrRangeSeq{ + length: 1, + offset: ar.Start, + limit: ar.Length(), + } +} + +// AddrRangeSeqFromSlice returns an AddrRangeSeq representing all AddrRanges in +// slice. +// +// Whether the returned AddrRangeSeq shares memory with slice is unspecified; +// clients should avoid mutating slices passed to AddrRangeSeqFromSlice. +// +// Preconditions: The combined length of all AddrRanges in slice <= +// math.MaxInt64. +func AddrRangeSeqFromSlice(slice []AddrRange) AddrRangeSeq { + var limit int64 + for _, ar := range slice { + len64 := int64(ar.Length()) + if len64 < 0 { + panic(fmt.Sprintf("Length of AddrRange %v overflows int64", ar)) + } + sum := limit + len64 + if sum < limit { + panic(fmt.Sprintf("Total length of AddrRanges %v overflows int64", slice)) + } + limit = sum + } + return addrRangeSeqFromSliceLimited(slice, limit) +} + +// Preconditions: The combined length of all AddrRanges in slice <= limit. +// limit >= 0. If len(slice) != 0, then limit > 0. +func addrRangeSeqFromSliceLimited(slice []AddrRange, limit int64) AddrRangeSeq { + switch len(slice) { + case 0: + return AddrRangeSeq{} + case 1: + return AddrRangeSeq{ + length: 1, + offset: slice[0].Start, + limit: Addr(limit), + } + default: + return AddrRangeSeq{ + data: unsafe.Pointer(&slice[0]), + length: len(slice), + limit: Addr(limit), + } + } +} + +// IsEmpty returns true if ars.NumRanges() == 0. +// +// Note that since AddrRangeSeq may contain AddrRanges with a length of zero, +// an AddrRange representing 0 bytes (AddrRangeSeq.NumBytes() == 0) is not +// necessarily empty. +func (ars AddrRangeSeq) IsEmpty() bool { + return ars.length == 0 +} + +// NumRanges returns the number of AddrRanges in ars. +func (ars AddrRangeSeq) NumRanges() int { + return ars.length +} + +// NumBytes returns the number of bytes represented by ars. +func (ars AddrRangeSeq) NumBytes() int64 { + return int64(ars.limit) +} + +// Head returns the first AddrRange in ars. +// +// Preconditions: !ars.IsEmpty(). +func (ars AddrRangeSeq) Head() AddrRange { + if ars.length == 0 { + panic("empty AddrRangeSeq") + } + if ars.length == 1 { + return AddrRange{ars.offset, ars.offset + ars.limit} + } + ar := *(*AddrRange)(ars.data) + ar.Start += ars.offset + if ar.Length() > ars.limit { + ar.End = ar.Start + ars.limit + } + return ar +} + +// Tail returns an AddrRangeSeq consisting of all AddrRanges in ars after the +// first. +// +// Preconditions: !ars.IsEmpty(). +func (ars AddrRangeSeq) Tail() AddrRangeSeq { + if ars.length == 0 { + panic("empty AddrRangeSeq") + } + if ars.length == 1 { + return AddrRangeSeq{} + } + return ars.externalTail() +} + +// Preconditions: ars.length >= 2. +func (ars AddrRangeSeq) externalTail() AddrRangeSeq { + headLen := (*AddrRange)(ars.data).Length() - ars.offset + var tailLimit int64 + if ars.limit > headLen { + tailLimit = int64(ars.limit - headLen) + } + var extSlice []AddrRange + extSliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&extSlice)) + extSliceHdr.Data = uintptr(ars.data) + extSliceHdr.Len = ars.length + extSliceHdr.Cap = ars.length + return addrRangeSeqFromSliceLimited(extSlice[1:], tailLimit) +} + +// DropFirst returns an AddrRangeSeq equivalent to ars, but with the first n +// bytes omitted. If n > ars.NumBytes(), DropFirst returns an empty +// AddrRangeSeq. +// +// If !ars.IsEmpty() and ars.Head().Length() == 0, DropFirst will always omit +// at least ars.Head(), even if n == 0. This guarantees that the basic pattern +// of: +// +// for !ars.IsEmpty() { +// n, err = doIOWith(ars.Head()) +// if err != nil { +// return err +// } +// ars = ars.DropFirst(n) +// } +// +// works even in the presence of zero-length AddrRanges. +// +// Preconditions: n >= 0. +func (ars AddrRangeSeq) DropFirst(n int) AddrRangeSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return ars.DropFirst64(int64(n)) +} + +// DropFirst64 is equivalent to DropFirst but takes an int64. +func (ars AddrRangeSeq) DropFirst64(n int64) AddrRangeSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + if Addr(n) > ars.limit { + return AddrRangeSeq{} + } + // Handle initial empty AddrRange. + switch ars.length { + case 0: + return AddrRangeSeq{} + case 1: + if ars.limit == 0 { + return AddrRangeSeq{} + } + default: + if rawHeadLen := (*AddrRange)(ars.data).Length(); ars.offset == rawHeadLen { + ars = ars.externalTail() + } + } + for n != 0 { + // Calling ars.Head() here is surprisingly expensive, so inline getting + // the head's length. + var headLen Addr + if ars.length == 1 { + headLen = ars.limit + } else { + headLen = (*AddrRange)(ars.data).Length() - ars.offset + } + if Addr(n) < headLen { + // Dropping ends partway through the head AddrRange. + ars.offset += Addr(n) + ars.limit -= Addr(n) + return ars + } + n -= int64(headLen) + ars = ars.Tail() + } + return ars +} + +// TakeFirst returns an AddrRangeSeq equivalent to ars, but iterating at most n +// bytes. TakeFirst never removes AddrRanges from ars; AddrRanges beyond the +// first n bytes are reduced to a length of zero, but will still be iterated. +// +// Preconditions: n >= 0. +func (ars AddrRangeSeq) TakeFirst(n int) AddrRangeSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + return ars.TakeFirst64(int64(n)) +} + +// TakeFirst64 is equivalent to TakeFirst but takes an int64. +func (ars AddrRangeSeq) TakeFirst64(n int64) AddrRangeSeq { + if n < 0 { + panic(fmt.Sprintf("invalid n: %d", n)) + } + if ars.limit > Addr(n) { + ars.limit = Addr(n) + } + return ars +} + +// String implements fmt.Stringer.String. +func (ars AddrRangeSeq) String() string { + // This is deliberately chosen to be the same as fmt's automatic stringer + // for []AddrRange. + var buf bytes.Buffer + buf.WriteByte('[') + var sep string + for !ars.IsEmpty() { + buf.WriteString(sep) + sep = " " + buf.WriteString(ars.Head().String()) + ars = ars.Tail() + } + buf.WriteByte(']') + return buf.String() +} diff --git a/pkg/sentry/usermem/bytes_io.go b/pkg/sentry/usermem/bytes_io.go new file mode 100644 index 000000000..01a746404 --- /dev/null +++ b/pkg/sentry/usermem/bytes_io.go @@ -0,0 +1,126 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +const maxInt = int(^uint(0) >> 1) + +// BytesIO implements IO using a byte slice. Addresses are interpreted as +// offsets into the slice. Reads and writes beyond the end of the slice return +// EFAULT. +type BytesIO struct { + Bytes []byte +} + +// CopyOut implements IO.CopyOut. +func (b *BytesIO) CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error) { + rngN, rngErr := b.rangeCheck(addr, len(src)) + if rngN == 0 { + return 0, rngErr + } + return copy(b.Bytes[int(addr):], src[:rngN]), rngErr +} + +// CopyIn implements IO.CopyIn. +func (b *BytesIO) CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error) { + rngN, rngErr := b.rangeCheck(addr, len(dst)) + if rngN == 0 { + return 0, rngErr + } + return copy(dst[:rngN], b.Bytes[int(addr):]), rngErr +} + +// ZeroOut implements IO.ZeroOut. +func (b *BytesIO) ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error) { + if toZero > int64(maxInt) { + return 0, syserror.EINVAL + } + rngN, rngErr := b.rangeCheck(addr, int(toZero)) + if rngN == 0 { + return 0, rngErr + } + zeroSlice := b.Bytes[int(addr) : int(addr)+rngN] + for i := range zeroSlice { + zeroSlice[i] = 0 + } + return int64(rngN), rngErr +} + +// CopyOutFrom implements IO.CopyOutFrom. +func (b *BytesIO) CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error) { + dsts, rngErr := b.blocksFromAddrRanges(ars) + n, err := src.ReadToBlocks(dsts) + if err != nil { + return int64(n), err + } + return int64(n), rngErr +} + +// CopyInTo implements IO.CopyInTo. +func (b *BytesIO) CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error) { + srcs, rngErr := b.blocksFromAddrRanges(ars) + n, err := dst.WriteFromBlocks(srcs) + if err != nil { + return int64(n), err + } + return int64(n), rngErr +} + +func (b *BytesIO) rangeCheck(addr Addr, length int) (int, error) { + if length == 0 { + return 0, nil + } + if length < 0 { + return 0, syserror.EINVAL + } + max := Addr(len(b.Bytes)) + if addr >= max { + return 0, syserror.EFAULT + } + end, ok := addr.AddLength(uint64(length)) + if !ok || end > max { + return int(max - addr), syserror.EFAULT + } + return length, nil +} + +func (b *BytesIO) blocksFromAddrRanges(ars AddrRangeSeq) (safemem.BlockSeq, error) { + blocks := make([]safemem.Block, 0, ars.NumRanges()) + for !ars.IsEmpty() { + ar := ars.Head() + n, err := b.rangeCheck(ar.Start, int(ar.Length())) + if n != 0 { + blocks = append(blocks, safemem.BlockFromSafeSlice(b.Bytes[int(ar.Start):int(ar.Start)+n])) + } + if err != nil { + return safemem.BlockSeqFromSlice(blocks), err + } + ars = ars.Tail() + } + return safemem.BlockSeqFromSlice(blocks), nil +} + +// BytesIOSequence returns an IOSequence representing the given byte slice. +func BytesIOSequence(buf []byte) IOSequence { + return IOSequence{ + IO: &BytesIO{buf}, + Addrs: AddrRangeSeqOf(AddrRange{0, Addr(len(buf))}), + } +} diff --git a/pkg/sentry/usermem/bytes_io_unsafe.go b/pkg/sentry/usermem/bytes_io_unsafe.go new file mode 100644 index 000000000..efd71fcbc --- /dev/null +++ b/pkg/sentry/usermem/bytes_io_unsafe.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "sync/atomic" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/atomicbitops" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" +) + +// SwapUint32 implements IO.SwapUint32. +func (b *BytesIO) SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error) { + if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil { + return 0, rngErr + } + return atomic.SwapUint32((*uint32)(unsafe.Pointer(&b.Bytes[int(addr)])), new), nil +} + +// CompareAndSwapUint32 implements IO.CompareAndSwapUint32. +func (b *BytesIO) CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error) { + if _, rngErr := b.rangeCheck(addr, 4); rngErr != nil { + return 0, rngErr + } + return atomicbitops.CompareAndSwapUint32((*uint32)(unsafe.Pointer(&b.Bytes[int(addr)])), old, new), nil +} diff --git a/pkg/sentry/usermem/usermem.go b/pkg/sentry/usermem/usermem.go new file mode 100644 index 000000000..5d8a1c558 --- /dev/null +++ b/pkg/sentry/usermem/usermem.go @@ -0,0 +1,572 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package usermem governs access to user memory. +package usermem + +import ( + "errors" + "io" + "strconv" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// IO provides access to the contents of a virtual memory space. +// +// FIXME: Implementations of IO cannot expect ctx to contain any +// meaningful data. +type IO interface { + // CopyOut copies len(src) bytes from src to the memory mapped at addr. It + // returns the number of bytes copied. If the number of bytes copied is < + // len(src), it returns a non-nil error explaining why. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. + CopyOut(ctx context.Context, addr Addr, src []byte, opts IOOpts) (int, error) + + // CopyIn copies len(dst) bytes from the memory mapped at addr to dst. + // It returns the number of bytes copied. If the number of bytes copied is + // < len(dst), it returns a non-nil error explaining why. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. + CopyIn(ctx context.Context, addr Addr, dst []byte, opts IOOpts) (int, error) + + // ZeroOut sets toZero bytes to 0, starting at addr. It returns the number + // of bytes zeroed. If the number of bytes zeroed is < toZero, it returns a + // non-nil error explaining why. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. toZero >= 0. + ZeroOut(ctx context.Context, addr Addr, toZero int64, opts IOOpts) (int64, error) + + // CopyOutFrom copies ars.NumBytes() bytes from src to the memory mapped at + // ars. It returns the number of bytes copied, which may be less than the + // number of bytes read from src if copying fails. CopyOutFrom may return a + // partial copy without an error iff src.ReadToBlocks returns a partial + // read without an error. + // + // CopyOutFrom calls src.ReadToBlocks at most once. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. src.ReadToBlocks must not block + // on mm.MemoryManager.activeMu or any preceding locks in the lock order. + CopyOutFrom(ctx context.Context, ars AddrRangeSeq, src safemem.Reader, opts IOOpts) (int64, error) + + // CopyInTo copies ars.NumBytes() bytes from the memory mapped at ars to + // dst. It returns the number of bytes copied. CopyInTo may return a + // partial copy without an error iff dst.WriteFromBlocks returns a partial + // write without an error. + // + // CopyInTo calls dst.WriteFromBlocks at most once. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. dst.WriteFromBlocks must not + // block on mm.MemoryManager.activeMu or any preceding locks in the lock + // order. + CopyInTo(ctx context.Context, ars AddrRangeSeq, dst safemem.Writer, opts IOOpts) (int64, error) + + // TODO: The requirement that CopyOutFrom/CopyInTo call src/dst + // at most once, which is unnecessary in most cases, forces implementations + // to gather safemem.Blocks into a single slice to pass to src/dst. Add + // CopyOutFromIter/CopyInToIter, which relaxes this restriction, to avoid + // this allocation. + + // SwapUint32 atomically sets the uint32 value at addr to new and + // returns the previous value. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. addr must be aligned to a 4-byte + // boundary. + SwapUint32(ctx context.Context, addr Addr, new uint32, opts IOOpts) (uint32, error) + + // CompareAndSwapUint32 atomically compares the uint32 value at addr to + // old; if they are equal, the value in memory is replaced by new. In + // either case, the previous value stored in memory is returned. + // + // Preconditions: The caller must not hold mm.MemoryManager.mappingMu or + // any following locks in the lock order. addr must be aligned to a 4-byte + // boundary. + CompareAndSwapUint32(ctx context.Context, addr Addr, old, new uint32, opts IOOpts) (uint32, error) +} + +// IOOpts contains options applicable to all IO methods. +type IOOpts struct { + // If IgnorePermissions is true, application-defined memory protections set + // by mmap(2) or mprotect(2) will be ignored. (Memory protections required + // by the target of the mapping are never ignored.) + IgnorePermissions bool + + // If AddressSpaceActive is true, the IO implementation may assume that it + // has an active AddressSpace and can therefore use AddressSpace copying + // without performing activation. See mm/io.go for details. + AddressSpaceActive bool +} + +// IOReadWriter is an io.ReadWriter that reads from / writes to addresses +// starting at addr in IO. The preconditions that apply to IO.CopyIn and +// IO.CopyOut also apply to IOReadWriter.Read and IOReadWriter.Write +// respectively. +type IOReadWriter struct { + Ctx context.Context + IO IO + Addr Addr + Opts IOOpts +} + +// Read implements io.Reader.Read. +// +// Note that an address space does not have an "end of file", so Read can only +// return io.EOF if IO.CopyIn returns io.EOF. Attempts to read unmapped or +// unreadable memory, or beyond the end of the address space, should return +// EFAULT. +func (rw *IOReadWriter) Read(dst []byte) (int, error) { + n, err := rw.IO.CopyIn(rw.Ctx, rw.Addr, dst, rw.Opts) + end, ok := rw.Addr.AddLength(uint64(n)) + if ok { + rw.Addr = end + } else { + // Disallow wraparound. + rw.Addr = ^Addr(0) + if err != nil { + err = syserror.EFAULT + } + } + return n, err +} + +// Writer implements io.Writer.Write. +func (rw *IOReadWriter) Write(src []byte) (int, error) { + n, err := rw.IO.CopyOut(rw.Ctx, rw.Addr, src, rw.Opts) + end, ok := rw.Addr.AddLength(uint64(n)) + if ok { + rw.Addr = end + } else { + // Disallow wraparound. + rw.Addr = ^Addr(0) + if err != nil { + err = syserror.EFAULT + } + } + return n, err +} + +// CopyObjectOut copies a fixed-size value or slice of fixed-size values from +// src to the memory mapped at addr in uio. It returns the number of bytes +// copied. +// +// CopyObjectOut must use reflection to encode src; performance-sensitive +// clients should do encoding manually and use uio.CopyOut directly. +// +// Preconditions: As for IO.CopyOut. +func CopyObjectOut(ctx context.Context, uio IO, addr Addr, src interface{}, opts IOOpts) (int, error) { + w := &IOReadWriter{ + Ctx: ctx, + IO: uio, + Addr: addr, + Opts: opts, + } + return w.Write(binary.Marshal(nil, ByteOrder, src)) +} + +// CopyObjectIn copies a fixed-size value or slice of fixed-size values from +// the memory mapped at addr in uio to dst. It returns the number of bytes +// copied. +// +// CopyObjectIn must use reflection to decode dst; performance-sensitive +// clients should use uio.CopyIn directly and do decoding manually. +// +// Preconditions: As for IO.CopyIn. +func CopyObjectIn(ctx context.Context, uio IO, addr Addr, dst interface{}, opts IOOpts) (int, error) { + r := &IOReadWriter{ + Ctx: ctx, + IO: uio, + Addr: addr, + Opts: opts, + } + buf := make([]byte, binary.Size(dst)) + if _, err := io.ReadFull(r, buf); err != nil { + return 0, err + } + binary.Unmarshal(buf, ByteOrder, dst) + return int(r.Addr - addr), nil +} + +// copyStringIncrement is the maximum number of bytes that are copied from +// virtual memory at a time by CopyStringIn. +const copyStringIncrement = 64 + +// CopyStringIn copies a NUL-terminated string of unknown length from the +// memory mapped at addr in uio and returns it as a string (not including the +// trailing NUL). If the length of the string, including the terminating NUL, +// would exceed maxlen, CopyStringIn returns the string truncated to maxlen and +// ENAMETOOLONG. +// +// Preconditions: As for IO.CopyFromUser. maxlen >= 0. +func CopyStringIn(ctx context.Context, uio IO, addr Addr, maxlen int, opts IOOpts) (string, error) { + buf := make([]byte, maxlen) + var done int + for done < maxlen { + start, ok := addr.AddLength(uint64(done)) + if !ok { + // Last page of kernel memory. The application can't use this + // anyway. + return string(buf[:done]), syserror.EFAULT + } + // Read up to copyStringIncrement bytes at a time. + readlen := copyStringIncrement + if readlen > maxlen-done { + readlen = maxlen - done + } + end, ok := start.AddLength(uint64(readlen)) + if !ok { + return string(buf[:done]), syserror.EFAULT + } + // Shorten the read to avoid crossing page boundaries, since faulting + // in a page unnecessarily is expensive. This also ensures that partial + // copies up to the end of application-mappable memory succeed. + if start.RoundDown() != end.RoundDown() { + end = end.RoundDown() + } + n, err := uio.CopyIn(ctx, start, buf[done:done+int(end-start)], opts) + // Look for the terminating zero byte, which may have occurred before + // hitting err. + for i, c := range buf[done : done+n] { + if c == 0 { + return string(buf[:done+i]), nil + } + } + done += n + if err != nil { + return string(buf[:done]), err + } + } + return string(buf), syserror.ENAMETOOLONG +} + +// CopyOutVec copies bytes from src to the memory mapped at ars in uio. The +// maximum number of bytes copied is ars.NumBytes() or len(src), whichever is +// less. CopyOutVec returns the number of bytes copied; if this is less than +// the maximum, it returns a non-nil error explaining why. +// +// Preconditions: As for IO.CopyOut. +func CopyOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, src []byte, opts IOOpts) (int, error) { + var done int + for !ars.IsEmpty() && done < len(src) { + ar := ars.Head() + cplen := len(src) - done + if Addr(cplen) >= ar.Length() { + cplen = int(ar.Length()) + } + n, err := uio.CopyOut(ctx, ar.Start, src[done:done+cplen], opts) + done += n + if err != nil { + return done, err + } + ars = ars.DropFirst(n) + } + return done, nil +} + +// CopyInVec copies bytes from the memory mapped at ars in uio to dst. The +// maximum number of bytes copied is ars.NumBytes() or len(dst), whichever is +// less. CopyInVec returns the number of bytes copied; if this is less than the +// maximum, it returns a non-nil error explaining why. +// +// Preconditions: As for IO.CopyIn. +func CopyInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst []byte, opts IOOpts) (int, error) { + var done int + for !ars.IsEmpty() && done < len(dst) { + ar := ars.Head() + cplen := len(dst) - done + if Addr(cplen) >= ar.Length() { + cplen = int(ar.Length()) + } + n, err := uio.CopyIn(ctx, ar.Start, dst[done:done+cplen], opts) + done += n + if err != nil { + return done, err + } + ars = ars.DropFirst(n) + } + return done, nil +} + +// ZeroOutVec writes zeroes to the memory mapped at ars in uio. The maximum +// number of bytes written is ars.NumBytes() or toZero, whichever is less. +// ZeroOutVec returns the number of bytes written; if this is less than the +// maximum, it returns a non-nil error explaining why. +// +// Preconditions: As for IO.ZeroOut. +func ZeroOutVec(ctx context.Context, uio IO, ars AddrRangeSeq, toZero int64, opts IOOpts) (int64, error) { + var done int64 + for !ars.IsEmpty() && done < toZero { + ar := ars.Head() + cplen := toZero - done + if Addr(cplen) >= ar.Length() { + cplen = int64(ar.Length()) + } + n, err := uio.ZeroOut(ctx, ar.Start, cplen, opts) + done += n + if err != nil { + return done, err + } + ars = ars.DropFirst64(n) + } + return done, nil +} + +func isASCIIWhitespace(b byte) bool { + // Compare Linux include/linux/ctype.h, lib/ctype.c. + // 9 => horizontal tab '\t' + // 10 => line feed '\n' + // 11 => vertical tab '\v' + // 12 => form feed '\c' + // 13 => carriage return '\r' + return b == ' ' || (b >= 9 && b <= 13) +} + +// CopyInt32StringsInVec copies up to len(dsts) whitespace-separated decimal +// strings from the memory mapped at ars in uio and converts them to int32 +// values in dsts. It returns the number of bytes read. +// +// CopyInt32StringsInVec shares the following properties with Linux's +// kernel/sysctl.c:proc_dointvec(write=1): +// +// - If any read value overflows the range of int32, or any invalid characters +// are encountered during the read, CopyInt32StringsInVec returns EINVAL. +// +// - If, upon reaching the end of ars, fewer than len(dsts) values have been +// read, CopyInt32StringsInVec returns no error if at least 1 value was read +// and EINVAL otherwise. +// +// - Trailing whitespace after the last successfully read value is counted in +// the number of bytes read. +// +// Unlike proc_dointvec(): +// +// - CopyInt32StringsInVec does not implicitly limit ars.NumBytes() to +// PageSize-1; callers that require this must do so explicitly. +// +// - CopyInt32StringsInVec returns EINVAL if ars.NumBytes() == 0. +// +// Preconditions: As for CopyInVec. +func CopyInt32StringsInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dsts []int32, opts IOOpts) (int64, error) { + if len(dsts) == 0 { + return 0, nil + } + + buf := make([]byte, ars.NumBytes()) + n, cperr := CopyInVec(ctx, uio, ars, buf, opts) + buf = buf[:n] + + var i, j int + for ; j < len(dsts); j++ { + // Skip leading whitespace. + for i < len(buf) && isASCIIWhitespace(buf[i]) { + i++ + } + if i == len(buf) { + break + } + + // Find the end of the value to be parsed (next whitespace or end of string). + nextI := i + 1 + for nextI < len(buf) && !isASCIIWhitespace(buf[nextI]) { + nextI++ + } + + // Parse a single value. + val, err := strconv.ParseInt(string(buf[i:nextI]), 10, 32) + if err != nil { + return int64(i), syserror.EINVAL + } + dsts[j] = int32(val) + + i = nextI + } + + // Skip trailing whitespace. + for i < len(buf) && isASCIIWhitespace(buf[i]) { + i++ + } + + if cperr != nil { + return int64(i), cperr + } + if j == 0 { + return int64(i), syserror.EINVAL + } + return int64(i), nil +} + +// CopyInt32StringInVec is equivalent to CopyInt32StringsInVec, but copies at +// most one int32. +func CopyInt32StringInVec(ctx context.Context, uio IO, ars AddrRangeSeq, dst *int32, opts IOOpts) (int64, error) { + dsts := [1]int32{*dst} + n, err := CopyInt32StringsInVec(ctx, uio, ars, dsts[:], opts) + *dst = dsts[0] + return n, err +} + +// IOSequence holds arguments to IO methods. +type IOSequence struct { + IO IO + Addrs AddrRangeSeq + Opts IOOpts +} + +// NumBytes returns s.Addrs.NumBytes(). +// +// Note that NumBytes() may return 0 even if !s.Addrs.IsEmpty(), since +// s.Addrs may contain a non-zero number of zero-length AddrRanges. +// Many clients of +// IOSequence currently do something like: +// +// if ioseq.NumBytes() == 0 { +// return 0, nil +// } +// if f.availableBytes == 0 { +// return 0, syserror.ErrWouldBlock +// } +// return ioseq.CopyOutFrom(..., reader) +// +// In such cases, using s.Addrs.IsEmpty() will cause them to have the wrong +// behavior for zero-length I/O. However, using s.NumBytes() == 0 instead means +// that we will return success for zero-length I/O in cases where Linux would +// return EFAULT due to a failed access_ok() check, so in the long term we +// should move checks for ErrWouldBlock etc. into the body of +// reader.ReadToBlocks and use s.Addrs.IsEmpty() instead. +func (s IOSequence) NumBytes() int64 { + return s.Addrs.NumBytes() +} + +// DropFirst returns a copy of s with s.Addrs.DropFirst(n). +// +// Preconditions: As for AddrRangeSeq.DropFirst. +func (s IOSequence) DropFirst(n int) IOSequence { + return IOSequence{s.IO, s.Addrs.DropFirst(n), s.Opts} +} + +// DropFirst64 returns a copy of s with s.Addrs.DropFirst64(n). +// +// Preconditions: As for AddrRangeSeq.DropFirst64. +func (s IOSequence) DropFirst64(n int64) IOSequence { + return IOSequence{s.IO, s.Addrs.DropFirst64(n), s.Opts} +} + +// TakeFirst returns a copy of s with s.Addrs.TakeFirst(n). +// +// Preconditions: As for AddrRangeSeq.TakeFirst. +func (s IOSequence) TakeFirst(n int) IOSequence { + return IOSequence{s.IO, s.Addrs.TakeFirst(n), s.Opts} +} + +// TakeFirst64 returns a copy of s with s.Addrs.TakeFirst64(n). +// +// Preconditions: As for AddrRangeSeq.TakeFirst64. +func (s IOSequence) TakeFirst64(n int64) IOSequence { + return IOSequence{s.IO, s.Addrs.TakeFirst64(n), s.Opts} +} + +// CopyOut invokes CopyOutVec over s.Addrs. +// +// As with CopyOutVec, if s.NumBytes() < len(src), the copy will be truncated +// to s.NumBytes(), and a nil error will be returned. +// +// Preconditions: As for CopyOutVec. +func (s IOSequence) CopyOut(ctx context.Context, src []byte) (int, error) { + return CopyOutVec(ctx, s.IO, s.Addrs, src, s.Opts) +} + +// CopyIn invokes CopyInVec over s.Addrs. +// +// As with CopyInVec, if s.NumBytes() < len(dst), the copy will be truncated to +// s.NumBytes(), and a nil error will be returned. +// +// Preconditions: As for CopyInVec. +func (s IOSequence) CopyIn(ctx context.Context, dst []byte) (int, error) { + return CopyInVec(ctx, s.IO, s.Addrs, dst, s.Opts) +} + +// ZeroOut invokes ZeroOutVec over s.Addrs. +// +// As with ZeroOutVec, if s.NumBytes() < toZero, the write will be truncated +// to s.NumBytes(), and a nil error will be returned. +// +// Preconditions: As for ZeroOutVec. +func (s IOSequence) ZeroOut(ctx context.Context, toZero int64) (int64, error) { + return ZeroOutVec(ctx, s.IO, s.Addrs, toZero, s.Opts) +} + +// CopyOutFrom invokes s.CopyOutFrom over s.Addrs. +// +// Preconditions: As for IO.CopyOutFrom. +func (s IOSequence) CopyOutFrom(ctx context.Context, src safemem.Reader) (int64, error) { + return s.IO.CopyOutFrom(ctx, s.Addrs, src, s.Opts) +} + +// CopyInTo invokes s.CopyInTo over s.Addrs. +// +// Preconditions: As for IO.CopyInTo. +func (s IOSequence) CopyInTo(ctx context.Context, dst safemem.Writer) (int64, error) { + return s.IO.CopyInTo(ctx, s.Addrs, dst, s.Opts) +} + +// Reader returns an io.Reader that reads from s. Reads beyond the end of s +// return io.EOF. The preconditions that apply to s.CopyIn also apply to the +// returned io.Reader.Read. +func (s IOSequence) Reader(ctx context.Context) io.Reader { + return &ioSequenceReadWriter{ctx, s} +} + +// Writer returns an io.Writer that writes to s. Writes beyond the end of s +// return ErrEndOfIOSequence. The preconditions that apply to s.CopyOut also +// apply to the returned io.Writer.Write. +func (s IOSequence) Writer(ctx context.Context) io.Writer { + return &ioSequenceReadWriter{ctx, s} +} + +// ErrEndOfIOSequence is returned by IOSequence.Writer().Write() when +// attempting to write beyond the end of the IOSequence. +var ErrEndOfIOSequence = errors.New("write beyond end of IOSequence") + +type ioSequenceReadWriter struct { + ctx context.Context + s IOSequence +} + +// Read implements io.Reader.Read. +func (rw *ioSequenceReadWriter) Read(dst []byte) (int, error) { + n, err := rw.s.CopyIn(rw.ctx, dst) + rw.s = rw.s.DropFirst(n) + if err == nil && rw.s.NumBytes() == 0 { + err = io.EOF + } + return n, err +} + +// Write implements io.Writer.Write. +func (rw *ioSequenceReadWriter) Write(src []byte) (int, error) { + n, err := rw.s.CopyOut(rw.ctx, src) + rw.s = rw.s.DropFirst(n) + if err == nil && n < len(src) { + err = ErrEndOfIOSequence + } + return n, err +} diff --git a/pkg/sentry/usermem/usermem_test.go b/pkg/sentry/usermem/usermem_test.go new file mode 100644 index 000000000..563560da8 --- /dev/null +++ b/pkg/sentry/usermem/usermem_test.go @@ -0,0 +1,411 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package usermem + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + "strings" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/sentry/context" + "gvisor.googlesource.com/gvisor/pkg/sentry/safemem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// newContext returns a context.Context that we can use in these tests (we +// can't use contexttest because it depends on usermem). +func newContext() context.Context { + return context.Background() +} + +func newBytesIOString(s string) *BytesIO { + return &BytesIO{[]byte(s)} +} + +func TestBytesIOCopyOutSuccess(t *testing.T) { + b := newBytesIOString("ABCDE") + n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{}) + if wantN := 3; n != wantN || err != nil { + t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := b.Bytes, []byte("AfooE"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyOutFailure(t *testing.T) { + b := newBytesIOString("ABC") + n, err := b.CopyOut(newContext(), 1, []byte("foo"), IOOpts{}) + if wantN, wantErr := 2, syserror.EFAULT; n != wantN || err != wantErr { + t.Errorf("CopyOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := b.Bytes, []byte("Afo"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyInSuccess(t *testing.T) { + b := newBytesIOString("AfooE") + var dst [3]byte + n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{}) + if wantN := 3; n != wantN || err != nil { + t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyInFailure(t *testing.T) { + b := newBytesIOString("Afo") + var dst [3]byte + n, err := b.CopyIn(newContext(), 1, dst[:], IOOpts{}) + if wantN, wantErr := 2, syserror.EFAULT; n != wantN || err != wantErr { + t.Errorf("CopyIn: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := dst[:], []byte("fo\x00"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } +} + +func TestBytesIOZeroOutSuccess(t *testing.T) { + b := newBytesIOString("ABCD") + n, err := b.ZeroOut(newContext(), 1, 2, IOOpts{}) + if wantN := int64(2); n != wantN || err != nil { + t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := b.Bytes, []byte("A\x00\x00D"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOZeroOutFailure(t *testing.T) { + b := newBytesIOString("ABC") + n, err := b.ZeroOut(newContext(), 1, 3, IOOpts{}) + if wantN, wantErr := int64(2), syserror.EFAULT; n != wantN || err != wantErr { + t.Errorf("ZeroOut: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := b.Bytes, []byte("A\x00\x00"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyOutFromSuccess(t *testing.T) { + b := newBytesIOString("ABCDEFGH") + n, err := b.CopyOutFrom(newContext(), AddrRangeSeqFromSlice([]AddrRange{ + {Start: 4, End: 7}, + {Start: 1, End: 4}, + }), safemem.FromIOReader{bytes.NewBufferString("barfoo")}, IOOpts{}) + if wantN := int64(6); n != wantN || err != nil { + t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := b.Bytes, []byte("AfoobarH"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyOutFromFailure(t *testing.T) { + b := newBytesIOString("ABCDE") + n, err := b.CopyOutFrom(newContext(), AddrRangeSeqFromSlice([]AddrRange{ + {Start: 1, End: 4}, + {Start: 4, End: 7}, + }), safemem.FromIOReader{bytes.NewBufferString("foobar")}, IOOpts{}) + if wantN, wantErr := int64(4), syserror.EFAULT; n != wantN || err != wantErr { + t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := b.Bytes, []byte("Afoob"); !bytes.Equal(got, want) { + t.Errorf("Bytes: got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyInToSuccess(t *testing.T) { + b := newBytesIOString("AfoobarH") + var dst bytes.Buffer + n, err := b.CopyInTo(newContext(), AddrRangeSeqFromSlice([]AddrRange{ + {Start: 4, End: 7}, + {Start: 1, End: 4}, + }), safemem.FromIOWriter{&dst}, IOOpts{}) + if wantN := int64(6); n != wantN || err != nil { + t.Errorf("CopyInTo: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst.Bytes(), []byte("barfoo"); !bytes.Equal(got, want) { + t.Errorf("dst.Bytes(): got %q, wanted %q", got, want) + } +} + +func TestBytesIOCopyInToFailure(t *testing.T) { + b := newBytesIOString("Afoob") + var dst bytes.Buffer + n, err := b.CopyInTo(newContext(), AddrRangeSeqFromSlice([]AddrRange{ + {Start: 1, End: 4}, + {Start: 4, End: 7}, + }), safemem.FromIOWriter{&dst}, IOOpts{}) + if wantN, wantErr := int64(4), syserror.EFAULT; n != wantN || err != wantErr { + t.Errorf("CopyOutFrom: got (%v, %v), wanted (%v, %v)", n, err, wantN, wantErr) + } + if got, want := dst.Bytes(), []byte("foob"); !bytes.Equal(got, want) { + t.Errorf("dst.Bytes(): got %q, wanted %q", got, want) + } +} + +type testStruct struct { + Int8 int8 + Uint8 uint8 + Int16 int16 + Uint16 uint16 + Int32 int32 + Uint32 uint32 + Int64 int64 + Uint64 uint64 +} + +func TestCopyObject(t *testing.T) { + wantObj := testStruct{1, 2, 3, 4, 5, 6, 7, 8} + wantN := binary.Size(wantObj) + b := &BytesIO{make([]byte, wantN)} + ctx := newContext() + if n, err := CopyObjectOut(ctx, b, 0, &wantObj, IOOpts{}); n != wantN || err != nil { + t.Fatalf("CopyObjectOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + var gotObj testStruct + if n, err := CopyObjectIn(ctx, b, 0, &gotObj, IOOpts{}); n != wantN || err != nil { + t.Errorf("CopyObjectIn: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if gotObj != wantObj { + t.Errorf("CopyObject round trip: got %+v, wanted %+v", gotObj, wantObj) + } +} + +func TestCopyStringInShort(t *testing.T) { + want := strings.Repeat("A", copyStringIncrement-2) + mem := want + "\x00" + if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil { + t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want) + } +} + +func TestCopyStringInLong(t *testing.T) { + want := strings.Repeat("A", copyStringIncrement+1) + mem := want + "\x00" + if got, err := CopyStringIn(newContext(), newBytesIOString(mem), 0, 2*copyStringIncrement, IOOpts{}); got != want || err != nil { + t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, nil)", got, err, want) + } +} + +func TestCopyStringInNoTerminatingZeroByte(t *testing.T) { + want := strings.Repeat("A", copyStringIncrement-1) + got, err := CopyStringIn(newContext(), newBytesIOString(want), 0, 2*copyStringIncrement, IOOpts{}) + if wantErr := syserror.EFAULT; got != want || err != wantErr { + t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr) + } +} + +func TestCopyStringInTruncatedByMaxlen(t *testing.T) { + got, err := CopyStringIn(newContext(), newBytesIOString(strings.Repeat("A", 10)), 0, 5, IOOpts{}) + if want, wantErr := strings.Repeat("A", 5), syserror.ENAMETOOLONG; got != want || err != wantErr { + t.Errorf("CopyStringIn: got (%q, %v), wanted (%q, %v)", got, err, want, wantErr) + } +} + +func TestCopyInt32StringsInVec(t *testing.T) { + for _, test := range []struct { + str string + n int + initial []int32 + final []int32 + }{ + { + str: "100 200", + n: len("100 200"), + initial: []int32{1, 2}, + final: []int32{100, 200}, + }, + { + // Fewer values ok + str: "100", + n: len("100"), + initial: []int32{1, 2}, + final: []int32{100, 2}, + }, + { + // Extra values ok + str: "100 200 300", + n: len("100 200 "), + initial: []int32{1, 2}, + final: []int32{100, 200}, + }, + { + // Leading and trailing whitespace ok + str: " 100\t200\n", + n: len(" 100\t200\n"), + initial: []int32{1, 2}, + final: []int32{100, 200}, + }, + } { + t.Run(fmt.Sprintf("%q", test.str), func(t *testing.T) { + src := BytesIOSequence([]byte(test.str)) + dsts := append([]int32(nil), test.initial...) + if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); n != int64(test.n) || err != nil { + t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (%d, nil)", n, err, test.n) + } + if !reflect.DeepEqual(dsts, test.final) { + t.Errorf("dsts: got %v, wanted %v", dsts, test.final) + } + }) + } +} + +func TestCopyInt32StringsInVecRequiresOneValidValue(t *testing.T) { + for _, s := range []string{"", "\n", "a123"} { + t.Run(fmt.Sprintf("%q", s), func(t *testing.T) { + src := BytesIOSequence([]byte(s)) + initial := []int32{1, 2} + dsts := append([]int32(nil), initial...) + if n, err := CopyInt32StringsInVec(newContext(), src.IO, src.Addrs, dsts, src.Opts); err != syserror.EINVAL { + t.Errorf("CopyInt32StringsInVec: got (%d, %v), wanted (_, %v)", n, err, syserror.EINVAL) + } + if !reflect.DeepEqual(dsts, initial) { + t.Errorf("dsts: got %v, wanted %v", dsts, initial) + } + }) + } +} + +func TestIOSequenceCopyOut(t *testing.T) { + buf := []byte("ABCD") + s := BytesIOSequence(buf) + + // CopyOut limited by len(src). + n, err := s.CopyOut(newContext(), []byte("fo")) + if wantN := 2; n != wantN || err != nil { + t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("foCD"); !bytes.Equal(buf, want) { + t.Errorf("buf: got %q, wanted %q", buf, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(2); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + // CopyOut limited by s.NumBytes(). + n, err = s.CopyOut(newContext(), []byte("obar")) + if wantN := 2; n != wantN || err != nil { + t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("foob"); !bytes.Equal(buf, want) { + t.Errorf("buf: got %q, wanted %q", buf, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(0); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } +} + +func TestIOSequenceCopyIn(t *testing.T) { + s := BytesIOSequence([]byte("foob")) + dst := []byte("ABCDEF") + + // CopyIn limited by len(dst). + n, err := s.CopyIn(newContext(), dst[:2]) + if wantN := 2; n != wantN || err != nil { + t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("foCDEF"); !bytes.Equal(dst, want) { + t.Errorf("dst: got %q, wanted %q", dst, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(2); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + // CopyIn limited by s.Remaining(). + n, err = s.CopyIn(newContext(), dst[2:]) + if wantN := 2; n != wantN || err != nil { + t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("foobEF"); !bytes.Equal(dst, want) { + t.Errorf("dst: got %q, wanted %q", dst, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(0); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } +} + +func TestIOSequenceZeroOut(t *testing.T) { + buf := []byte("ABCD") + s := BytesIOSequence(buf) + + // ZeroOut limited by toZero. + n, err := s.ZeroOut(newContext(), 2) + if wantN := int64(2); n != wantN || err != nil { + t.Errorf("ZeroOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("\x00\x00CD"); !bytes.Equal(buf, want) { + t.Errorf("buf: got %q, wanted %q", buf, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(2); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + // ZeroOut limited by s.NumBytes(). + n, err = s.ZeroOut(newContext(), 4) + if wantN := int64(2); n != wantN || err != nil { + t.Errorf("CopyOut: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if want := []byte("\x00\x00\x00\x00"); !bytes.Equal(buf, want) { + t.Errorf("buf: got %q, wanted %q", buf, want) + } + s = s.DropFirst(2) + if got, want := s.NumBytes(), int64(0); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } +} + +func TestIOSequenceTakeFirst(t *testing.T) { + s := BytesIOSequence([]byte("foobar")) + if got, want := s.NumBytes(), int64(6); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + s = s.TakeFirst(3) + if got, want := s.NumBytes(), int64(3); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + // TakeFirst(n) where n > s.NumBytes() is a no-op. + s = s.TakeFirst(9) + if got, want := s.NumBytes(), int64(3); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } + + var dst [3]byte + n, err := s.CopyIn(newContext(), dst[:]) + if wantN := 3; n != wantN || err != nil { + t.Errorf("CopyIn: got (%v, %v), wanted (%v, nil)", n, err, wantN) + } + if got, want := dst[:], []byte("foo"); !bytes.Equal(got, want) { + t.Errorf("dst: got %q, wanted %q", got, want) + } + s = s.DropFirst(3) + if got, want := s.NumBytes(), int64(0); got != want { + t.Errorf("NumBytes: got %v, wanted %v", got, want) + } +} diff --git a/pkg/sentry/usermem/usermem_x86.go b/pkg/sentry/usermem/usermem_x86.go new file mode 100644 index 000000000..2484b0d82 --- /dev/null +++ b/pkg/sentry/usermem/usermem_x86.go @@ -0,0 +1,38 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 i386 + +package usermem + +import "encoding/binary" + +const ( + // PageSize is the system page size. + PageSize = 1 << PageShift + + // HugePageSize is the system huge page size. + HugePageSize = 1 << HugePageShift + + // PageShift is the binary log of the system page size. + PageShift = 12 + + // HugePageShift is the binary log of the system huge page size. + HugePageShift = 21 +) + +var ( + // ByteOrder is the native byte order (little endian). + ByteOrder = binary.LittleEndian +) diff --git a/pkg/sentry/watchdog/BUILD b/pkg/sentry/watchdog/BUILD new file mode 100644 index 000000000..28fae4490 --- /dev/null +++ b/pkg/sentry/watchdog/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "watchdog", + srcs = ["watchdog.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/watchdog", + visibility = ["//:sandbox"], + deps = [ + "//pkg/abi/linux", + "//pkg/log", + "//pkg/metric", + "//pkg/sentry/kernel", + "//pkg/sentry/kernel/time", + ], +) diff --git a/pkg/sentry/watchdog/watchdog.go b/pkg/sentry/watchdog/watchdog.go new file mode 100644 index 000000000..5b620693d --- /dev/null +++ b/pkg/sentry/watchdog/watchdog.go @@ -0,0 +1,279 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package watchdog is responsible for monitoring the sentry for tasks that may +// potentially be stuck or looping inderterminally causing hard to debug hungs in +// the untrusted app. +// +// It works by periodically querying all tasks to check whether they are in user +// mode (RunUser), kernel mode (RunSys), or blocked in the kernel (OffCPU). Tasks +// that have been running in kernel mode for a long time in the same syscall +// without blocking are considered stuck and are reported. +// +// When a stuck task is detected, the watchdog can take one of the following actions: +// 1. LogWarning: Logs a warning message followed by a stack dump of all goroutines. +// If a tasks continues to be stuck, the message will repeat every minute, unless +// a new stuck task is detected +// 2. Panic: same as above, followed by panic() +// +package watchdog + +import ( + "bytes" + "fmt" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/metric" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + ktime "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/time" +) + +// DefaultTimeout is a resonable timeout value for most applications. +const DefaultTimeout = 3 * time.Minute + +// descheduleThreshold is the amount of time scheduling needs to be off before the entire wait period +// is discounted from task's last update time. It's set high enough that small scheduling delays won't +// trigger it. +const descheduleThreshold = 1 * time.Second + +var stuckTasks = metric.MustCreateNewUint64Metric("/watchdog/stuck_tasks_detected", true /* sync */, "Cumulative count of stuck tasks detected") + +// Amount of time to wait before dumping the stack to the log again when the same task(s) remains stuck. +var stackDumpSameTaskPeriod = time.Minute + +// Action defines what action to take when a stuck task is detected. +type Action int + +const ( + // LogWarning logs warning message followed by stack trace. + LogWarning Action = iota + // Panic will do the same logging as LogWarning and panic(). + Panic +) + +// String returns Action's string representation. +func (a Action) String() string { + switch a { + case LogWarning: + return "LogWarning" + case Panic: + return "Panic" + default: + panic(fmt.Sprintf("Invalid action: %d", a)) + } +} + +// Watchdog is the main watchdog class. It controls a goroutine that periodically +// analyses all tasks and reports if any of them appear to be stuck. +type Watchdog struct { + // period indicates how often to check all tasks. It's calculated based on + // 'taskTimeout'. + period time.Duration + + // taskTimeout is the amount of time to allow a task to execute the same syscall + // without blocking before it's declared stuck. + taskTimeout time.Duration + + // timeoutAction indicates what action to take when a stuck tasks is detected. + timeoutAction Action + + // k is where the tasks come from. + k *kernel.Kernel + + // stop is used to notify to watchdog should stop. + stop chan struct{} + + // done is used to notify when the watchdog has stopped. + done chan struct{} + + // offenders map contains all tasks that are currently stuck. + offenders map[*kernel.Task]*offender + + // lastStackDump tracks the last time a stack dump was generated to prevent + // spamming the log. + lastStackDump time.Time + + // lastRun is set to the last time the watchdog executed a monitoring loop. + lastRun ktime.Time + + // mu protects the fields below. + mu sync.Mutex + + // started is true if the watchdog has been started before. + started bool +} + +type offender struct { + lastUpdateTime ktime.Time +} + +// New creates a new watchdog. +func New(k *kernel.Kernel, taskTimeout time.Duration, a Action) *Watchdog { + // 4 is arbitrary, just don't want to prolong 'taskTimeout' too much. + period := taskTimeout / 4 + return &Watchdog{ + k: k, + period: period, + taskTimeout: taskTimeout, + timeoutAction: a, + offenders: make(map[*kernel.Task]*offender), + stop: make(chan struct{}), + done: make(chan struct{}), + } +} + +// Start starts the watchdog. +func (w *Watchdog) Start() { + if w.taskTimeout == 0 { + log.Infof("Watchdog disabled") + return + } + + w.mu.Lock() + defer w.mu.Unlock() + if w.started { + return + } + + w.lastRun = w.k.MonotonicClock().Now() + + log.Infof("Starting watchdog, period: %v, timeout: %v, action: %v", w.period, w.taskTimeout, w.timeoutAction) + go w.loop() // S/R-SAFE: watchdog is stopped during save and restarted after restore. + w.started = true +} + +// Stop requests the watchdog to stop and wait for it. +func (w *Watchdog) Stop() { + if w.taskTimeout == 0 { + return + } + + w.mu.Lock() + defer w.mu.Unlock() + if !w.started { + return + } + log.Infof("Stopping watchdog") + w.stop <- struct{}{} + <-w.done + w.started = false + log.Infof("Watchdog stopped") +} + +// loop is the main watchdog routine. It only returns when 'Stop()' is called. +func (w *Watchdog) loop() { + // Loop until someone stops it. + for { + select { + case <-w.stop: + w.done <- struct{}{} + return + case <-time.After(w.period): + w.runTurn() + } + } +} + +// runTurn runs a single pass over all tasks and reports anything it finds. +func (w *Watchdog) runTurn() { + tasks := w.k.TaskSet().Root.Tasks() + + newOffenders := make(map[*kernel.Task]*offender) + newTaskFound := false + now := ktime.FromNanoseconds(int64(w.k.CPUClockNow() * uint64(linux.ClockTick))) + + // The process may be running with low CPU limit making tasks appear stuck because + // are starved of CPU cycles. An estimate is that Tasks could have been starved + // since the last time the watchdog run. If the watchdog detects that scheduling + // is off, it will discount the entire duration since last run from 'lastUpdateTime'. + discount := time.Duration(0) + if now.Sub(w.lastRun.Add(w.period)) > descheduleThreshold { + discount = now.Sub(w.lastRun) + } + w.lastRun = now + + log.Infof("Watchdog starting loop, tasks: %d, discount: %v", len(tasks), discount) + for _, t := range tasks { + tsched := t.TaskGoroutineSchedInfo() + + // An offender is a task running inside the kernel for longer than the specified timeout. + if tsched.State == kernel.TaskGoroutineRunningSys { + lastUpdateTime := ktime.FromNanoseconds(int64(tsched.Timestamp * uint64(linux.ClockTick))) + elapsed := now.Sub(lastUpdateTime) - discount + if elapsed > w.taskTimeout { + tc, ok := w.offenders[t] + if !ok { + // New stuck task detected. + // + // TODO: Tasks blocked doing IO may be considered stuck in kernel. + tc = &offender{lastUpdateTime: lastUpdateTime} + stuckTasks.Increment() + newTaskFound = true + } + newOffenders[t] = tc + } + } + } + if len(newOffenders) > 0 { + w.report(newOffenders, newTaskFound, now) + } + + // Remember which tasks have been reported. + w.offenders = newOffenders +} + +// report takes appropriate action when a stuck task is detected. +func (w *Watchdog) report(offenders map[*kernel.Task]*offender, newTaskFound bool, now ktime.Time) { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Sentry detected %d stuck task(s):\n", len(offenders))) + for t, o := range offenders { + tid := w.k.TaskSet().Root.IDOfTask(t) + buf.WriteString(fmt.Sprintf("\tTask tid: %v (%#x), entered RunSys state %v ago.\n", tid, uint64(tid), now.Sub(o.lastUpdateTime))) + } + buf.WriteString("Search for '(*Task).run(0x..., 0x<tid>)' in the stack dump to find the offending goroutine") + + switch w.timeoutAction { + case LogWarning: + // Dump stack only if a new task is detected or if it sometime has passed since + // the last time a stack dump was generated. + if !newTaskFound && time.Since(w.lastStackDump) < stackDumpSameTaskPeriod { + buf.WriteString("\n...[stack dump skipped]...") + log.Warningf(buf.String()) + } else { + log.TracebackAll(buf.String()) + w.lastStackDump = time.Now() + } + + case Panic: + // Panic will skip over running tasks, which is likely the culprit here. So manually + // dump all stacks before panic'ing. + log.TracebackAll(buf.String()) + + // Attempt to flush metrics, timeout and move on in case metrics are stuck as well. + metricsEmitted := make(chan struct{}, 1) + go func() { // S/R-SAFE: watchdog is stopped during save and restarted after restore. + // Flush metrics before killing process. + metric.EmitMetricUpdate() + metricsEmitted <- struct{}{} + }() + select { + case <-metricsEmitted: + case <-time.After(1 * time.Second): + } + panic("Sentry detected stuck task(s). See stack trace and message above for more details") + } +} diff --git a/pkg/sleep/BUILD b/pkg/sleep/BUILD new file mode 100644 index 000000000..ab3f9ad99 --- /dev/null +++ b/pkg/sleep/BUILD @@ -0,0 +1,24 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "sleep", + srcs = [ + "commit_amd64.s", + "commit_asm.go", + "commit_noasm.go", + "sleep_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sleep", + visibility = ["//:sandbox"], +) + +go_test( + name = "sleep_test", + size = "medium", + srcs = [ + "sleep_test.go", + ], + embed = [":sleep"], +) diff --git a/pkg/sleep/commit_amd64.s b/pkg/sleep/commit_amd64.s new file mode 100644 index 000000000..a5b620bf8 --- /dev/null +++ b/pkg/sleep/commit_amd64.s @@ -0,0 +1,21 @@ +#include "textflag.h" + +#define preparingG 1 + +// See commit_noasm.go for a description of commitSleep. +// +// func commitSleep(g uintptr, waitingG *uintptr) bool +TEXT ·commitSleep(SB),NOSPLIT,$0-24 + MOVQ waitingG+8(FP), CX + MOVQ g+0(FP), DX + + // Store the G in waitingG if it's still preparingG. If it's anything + // else it means a waker has aborted the sleep. + MOVQ $preparingG, AX + LOCK + CMPXCHGQ DX, 0(CX) + + SETEQ AX + MOVB AX, ret+16(FP) + + RET diff --git a/pkg/sleep/commit_arm64.s b/pkg/sleep/commit_arm64.s new file mode 100644 index 000000000..9d351d00a --- /dev/null +++ b/pkg/sleep/commit_arm64.s @@ -0,0 +1,5 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Empty assembly file so empty func definitions work. diff --git a/pkg/sleep/commit_asm.go b/pkg/sleep/commit_asm.go new file mode 100644 index 000000000..b7589dfef --- /dev/null +++ b/pkg/sleep/commit_asm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 + +package sleep + +// See commit_noasm.go for a description of commitSleep. +func commitSleep(g uintptr, waitingG *uintptr) bool diff --git a/pkg/sleep/commit_noasm.go b/pkg/sleep/commit_noasm.go new file mode 100644 index 000000000..22c734e1d --- /dev/null +++ b/pkg/sleep/commit_noasm.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race +// +build !amd64 + +package sleep + +import "sync/atomic" + +// commitSleep signals to wakers that the given g is now sleeping. Wakers can +// then fetch it and wake it. +// +// The commit may fail if wakers have been asserted after our last check, in +// which case they will have set s.waitingG to zero. +// +// It is written in assembly because it is called from g0, so it doesn't have +// a race context. +func commitSleep(g uintptr, waitingG *uintptr) bool { + for { + // Check if the wait was aborted. + if atomic.LoadUintptr(waitingG) == 0 { + return false + } + + // Try to store the G so that wakers know who to wake. + if atomic.CompareAndSwapUintptr(waitingG, preparingG, g) { + return true + } + } +} diff --git a/pkg/sleep/sleep_test.go b/pkg/sleep/sleep_test.go new file mode 100644 index 000000000..eba18014c --- /dev/null +++ b/pkg/sleep/sleep_test.go @@ -0,0 +1,528 @@ +package sleep + +import ( + "math/rand" + "runtime" + "testing" + "time" +) + +// ZeroWakerNotAsserted tests that a zero-value waker is in non-asserted state. +func ZeroWakerNotAsserted(t *testing.T) { + var w Waker + if w.IsAsserted() { + t.Fatalf("Zero waker is asserted") + } + + if w.Clear() { + t.Fatalf("Zero waker is asserted") + } +} + +// AssertedWakerAfterAssert tests that a waker properly reports its state as +// asserted once its Assert() method is called. +func AssertedWakerAfterAssert(t *testing.T) { + var w Waker + w.Assert() + if !w.IsAsserted() { + t.Fatalf("Asserted waker is not reported as such") + } + + if !w.Clear() { + t.Fatalf("Asserted waker is not reported as such") + } +} + +// AssertedWakerAfterTwoAsserts tests that a waker properly reports its state as +// asserted once its Assert() method is called twice. +func AssertedWakerAfterTwoAsserts(t *testing.T) { + var w Waker + w.Assert() + w.Assert() + if !w.IsAsserted() { + t.Fatalf("Asserted waker is not reported as such") + } + + if !w.Clear() { + t.Fatalf("Asserted waker is not reported as such") + } +} + +// NotAssertedWakerWithSleeper tests that a waker properly reports its state as +// not asserted after a sleeper is associated with it. +func NotAssertedWakerWithSleeper(t *testing.T) { + var w Waker + var s Sleeper + s.AddWaker(&w, 0) + if w.IsAsserted() { + t.Fatalf("Non-asserted waker is reported as asserted") + } + + if w.Clear() { + t.Fatalf("Non-asserted waker is reported as asserted") + } +} + +// NotAssertedWakerAfterWake tests that a waker properly reports its state as +// not asserted after a previous assert is consumed by a sleeper. That is, tests +// the "edge-triggered" behavior. +func NotAssertedWakerAfterWake(t *testing.T) { + var w Waker + var s Sleeper + s.AddWaker(&w, 0) + w.Assert() + s.Fetch(true) + if w.IsAsserted() { + t.Fatalf("Consumed waker is reported as asserted") + } + + if w.Clear() { + t.Fatalf("Consumed waker is reported as asserted") + } +} + +// AssertedWakerBeforeAdd tests that a waker causes a sleeper to not sleep if +// it's already asserted before being added. +func AssertedWakerBeforeAdd(t *testing.T) { + var w Waker + var s Sleeper + w.Assert() + s.AddWaker(&w, 0) + + if _, ok := s.Fetch(false); !ok { + t.Fatalf("Fetch failed even though asserted waker was added") + } +} + +// ClearedWaker tests that a waker properly reports its state as not asserted +// after it is cleared. +func ClearedWaker(t *testing.T) { + var w Waker + w.Assert() + w.Clear() + if w.IsAsserted() { + t.Fatalf("Cleared waker is reported as asserted") + } + + if w.Clear() { + t.Fatalf("Cleared waker is reported as asserted") + } +} + +// ClearedWakerWithSleeper tests that a waker properly reports its state as +// not asserted when it is cleared while it has a sleeper associated with it. +func ClearedWakerWithSleeper(t *testing.T) { + var w Waker + var s Sleeper + s.AddWaker(&w, 0) + w.Clear() + if w.IsAsserted() { + t.Fatalf("Cleared waker is reported as asserted") + } + + if w.Clear() { + t.Fatalf("Cleared waker is reported as asserted") + } +} + +// ClearedWakerAssertedWithSleeper tests that a waker properly reports its state +// as not asserted when it is cleared while it has a sleeper associated with it +// and has been asserted. +func ClearedWakerAssertedWithSleeper(t *testing.T) { + var w Waker + var s Sleeper + s.AddWaker(&w, 0) + w.Assert() + w.Clear() + if w.IsAsserted() { + t.Fatalf("Cleared waker is reported as asserted") + } + + if w.Clear() { + t.Fatalf("Cleared waker is reported as asserted") + } +} + +// TestBlock tests that a sleeper actually blocks waiting for the waker to +// assert its state. +func TestBlock(t *testing.T) { + var w Waker + var s Sleeper + + s.AddWaker(&w, 0) + + // Assert waker after one second. + before := time.Now() + go func() { + time.Sleep(1 * time.Second) + w.Assert() + }() + + // Fetch the result and make sure it took at least 500ms. + if _, ok := s.Fetch(true); !ok { + t.Fatalf("Fetch failed unexpectedly") + } + if d := time.Now().Sub(before); d < 500*time.Millisecond { + t.Fatalf("Duration was too short: %v", d) + } + + // Check that already-asserted waker completes inline. + w.Assert() + if _, ok := s.Fetch(true); !ok { + t.Fatalf("Fetch failed unexpectedly") + } + + // Check that fetch sleeps if waker had been asserted but was reset + // before Fetch is called. + w.Assert() + w.Clear() + before = time.Now() + go func() { + time.Sleep(1 * time.Second) + w.Assert() + }() + if _, ok := s.Fetch(true); !ok { + t.Fatalf("Fetch failed unexpectedly") + } + if d := time.Now().Sub(before); d < 500*time.Millisecond { + t.Fatalf("Duration was too short: %v", d) + } +} + +// TestNonBlock checks that a sleeper won't block if waker isn't asserted. +func TestNonBlock(t *testing.T) { + var w Waker + var s Sleeper + + // Don't block when there's no waker. + if _, ok := s.Fetch(false); ok { + t.Fatalf("Fetch succeeded when there is no waker") + } + + // Don't block when waker isn't asserted. + s.AddWaker(&w, 0) + if _, ok := s.Fetch(false); ok { + t.Fatalf("Fetch succeeded when waker was not asserted") + } + + // Don't block when waker was asserted, but isn't anymore. + w.Assert() + w.Clear() + if _, ok := s.Fetch(false); ok { + t.Fatalf("Fetch succeeded when waker was not asserted anymore") + } + + // Don't block when waker was consumed by previous Fetch(). + w.Assert() + if _, ok := s.Fetch(false); !ok { + t.Fatalf("Fetch failed even though waker was asserted") + } + + if _, ok := s.Fetch(false); ok { + t.Fatalf("Fetch succeeded when waker had been consumed") + } +} + +// TestMultiple checks that a sleeper can wait for and receives notifications +// from multiple wakers. +func TestMultiple(t *testing.T) { + s := Sleeper{} + w1 := Waker{} + w2 := Waker{} + + s.AddWaker(&w1, 0) + s.AddWaker(&w2, 1) + + w1.Assert() + w2.Assert() + + v, ok := s.Fetch(false) + if !ok { + t.Fatalf("Fetch failed when there are asserted wakers") + } + + if v != 0 && v != 1 { + t.Fatalf("Unexpected waker id: %v", v) + } + + want := 1 - v + v, ok = s.Fetch(false) + if !ok { + t.Fatalf("Fetch failed when there is an asserted waker") + } + + if v != want { + t.Fatalf("Unexpected waker id, got %v, want %v", v, want) + } +} + +// TestDoneFunction tests if calling Done() on a sleeper works properly. +func TestDoneFunction(t *testing.T) { + // Trivial case of no waker. + s := Sleeper{} + s.Done() + + // Cases when the sleeper has n wakers, but none are asserted. + for n := 1; n < 20; n++ { + s := Sleeper{} + w := make([]Waker, n) + for j := 0; j < n; j++ { + s.AddWaker(&w[j], j) + } + s.Done() + } + + // Cases when the sleeper has n wakers, and only the i-th one is + // asserted. + for n := 1; n < 20; n++ { + for i := 0; i < n; i++ { + s := Sleeper{} + w := make([]Waker, n) + for j := 0; j < n; j++ { + s.AddWaker(&w[j], j) + } + w[i].Assert() + s.Done() + } + } + + // Cases when the sleeper has n wakers, and the i-th one is asserted + // and cleared. + for n := 1; n < 20; n++ { + for i := 0; i < n; i++ { + s := Sleeper{} + w := make([]Waker, n) + for j := 0; j < n; j++ { + s.AddWaker(&w[j], j) + } + w[i].Assert() + w[i].Clear() + s.Done() + } + } + + // Cases when the sleeper has n wakers, with a random number of them + // asserted. + for n := 1; n < 20; n++ { + for iters := 0; iters < 1000; iters++ { + s := Sleeper{} + w := make([]Waker, n) + for j := 0; j < n; j++ { + s.AddWaker(&w[j], j) + } + + // Pick the number of asserted elements, then assert + // random wakers. + asserted := rand.Int() % (n + 1) + for j := 0; j < asserted; j++ { + w[rand.Int()%n].Assert() + } + s.Done() + } + } +} + +// TestRace tests that multiple wakers can continuously send wake requests to +// the sleeper. +func TestRace(t *testing.T) { + const wakers = 100 + const wakeRequests = 10000 + + counts := make([]int, wakers) + w := make([]Waker, wakers) + s := Sleeper{} + + // Associate each waker and start goroutines that will assert them. + for i := range w { + s.AddWaker(&w[i], i) + go func(w *Waker) { + n := 0 + for n < wakeRequests { + if !w.IsAsserted() { + w.Assert() + n++ + } else { + runtime.Gosched() + } + } + }(&w[i]) + } + + // Wait for all wake up notifications from all wakers. + for i := 0; i < wakers*wakeRequests; i++ { + v, _ := s.Fetch(true) + counts[v]++ + } + + // Check that we got the right number for each. + for i, v := range counts { + if v != wakeRequests { + t.Errorf("Waker %v only got %v wakes", i, v) + } + } +} + +// BenchmarkSleeperMultiSelect measures how long it takes to fetch a wake up +// from 4 wakers when at least one is already asserted. +func BenchmarkSleeperMultiSelect(b *testing.B) { + const count = 4 + s := Sleeper{} + w := make([]Waker, count) + for i := range w { + s.AddWaker(&w[i], i) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + w[count-1].Assert() + s.Fetch(true) + } +} + +// BenchmarkGoMultiSelect measures how long it takes to fetch a zero-length +// struct from one of 4 channels when at least one is ready. +func BenchmarkGoMultiSelect(b *testing.B) { + const count = 4 + ch := make([]chan struct{}, count) + for i := range ch { + ch[i] = make(chan struct{}, 1) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch[count-1] <- struct{}{} + select { + case <-ch[0]: + case <-ch[1]: + case <-ch[2]: + case <-ch[3]: + } + } +} + +// BenchmarkSleeperSingleSelect measures how long it takes to fetch a wake up +// from one waker that is already asserted. +func BenchmarkSleeperSingleSelect(b *testing.B) { + s := Sleeper{} + w := Waker{} + s.AddWaker(&w, 0) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + w.Assert() + s.Fetch(true) + } +} + +// BenchmarkGoSingleSelect measures how long it takes to fetch a zero-length +// struct from a channel that already has it buffered. +func BenchmarkGoSingleSelect(b *testing.B) { + ch := make(chan struct{}, 1) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ch <- struct{}{} + <-ch + } +} + +// BenchmarkSleeperAssertNonWaiting measures how long it takes to assert a +// channel that is already asserted. +func BenchmarkSleeperAssertNonWaiting(b *testing.B) { + w := Waker{} + w.Assert() + for i := 0; i < b.N; i++ { + w.Assert() + } + +} + +// BenchmarkGoAssertNonWaiting measures how long it takes to write to a channel +// that has already something written to it. +func BenchmarkGoAssertNonWaiting(b *testing.B) { + ch := make(chan struct{}, 1) + ch <- struct{}{} + for i := 0; i < b.N; i++ { + select { + case ch <- struct{}{}: + default: + } + } +} + +// BenchmarkSleeperWaitOnSingleSelect measures how long it takes to wait on one +// waker channel while another goroutine wakes up the sleeper. This assumes that +// a new goroutine doesn't run immediately (i.e., the creator of a new goroutine +// is allowed to go to sleep before the new goroutine has a chance to run). +func BenchmarkSleeperWaitOnSingleSelect(b *testing.B) { + s := Sleeper{} + w := Waker{} + s.AddWaker(&w, 0) + for i := 0; i < b.N; i++ { + go func() { + w.Assert() + }() + s.Fetch(true) + } + +} + +// BenchmarkGoWaitOnSingleSelect measures how long it takes to wait on one +// channel while another goroutine wakes up the sleeper. This assumes that a new +// goroutine doesn't run immediately (i.e., the creator of a new goroutine is +// allowed to go to sleep before the new goroutine has a chance to run). +func BenchmarkGoWaitOnSingleSelect(b *testing.B) { + ch := make(chan struct{}, 1) + for i := 0; i < b.N; i++ { + go func() { + ch <- struct{}{} + }() + <-ch + } +} + +// BenchmarkSleeperWaitOnMultiSelect measures how long it takes to wait on 4 +// wakers while another goroutine wakes up the sleeper. This assumes that a new +// goroutine doesn't run immediately (i.e., the creator of a new goroutine is +// allowed to go to sleep before the new goroutine has a chance to run). +func BenchmarkSleeperWaitOnMultiSelect(b *testing.B) { + const count = 4 + s := Sleeper{} + w := make([]Waker, count) + for i := range w { + s.AddWaker(&w[i], i) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + go func() { + w[count-1].Assert() + }() + s.Fetch(true) + } +} + +// BenchmarkGoWaitOnMultiSelect measures how long it takes to wait on 4 channels +// while another goroutine wakes up the sleeper. This assumes that a new +// goroutine doesn't run immediately (i.e., the creator of a new goroutine is +// allowed to go to sleep before the new goroutine has a chance to run). +func BenchmarkGoWaitOnMultiSelect(b *testing.B) { + const count = 4 + ch := make([]chan struct{}, count) + for i := range ch { + ch[i] = make(chan struct{}, 1) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + go func() { + ch[count-1] <- struct{}{} + }() + select { + case <-ch[0]: + case <-ch[1]: + case <-ch[2]: + case <-ch[3]: + } + } +} diff --git a/pkg/sleep/sleep_unsafe.go b/pkg/sleep/sleep_unsafe.go new file mode 100644 index 000000000..5ecb7a3ac --- /dev/null +++ b/pkg/sleep/sleep_unsafe.go @@ -0,0 +1,385 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sleep allows goroutines to efficiently sleep on multiple sources of +// notifications (wakers). It offers O(1) complexity, which is different from +// multi-channel selects which have O(n) complexity (where n is the number of +// channels) and a considerable constant factor. +// +// It is similar to edge-triggered epoll waits, where the user registers each +// object of interest once, and then can repeatedly wait on all of them. +// +// A Waker object is used to wake a sleeping goroutine (G) up, or prevent it +// from going to sleep next. A Sleeper object is used to receive notifications +// from wakers, and if no notifications are available, to optionally sleep until +// one becomes available. +// +// A Waker can be associated with at most one Sleeper, but a Sleeper can be +// associated with multiple Wakers. A Sleeper has a list of asserted (ready) +// wakers; when Fetch() is called repeatedly, elements from this list are +// returned until the list becomes empty in which case the goroutine goes to +// sleep. When Assert() is called on a Waker, it adds itself to the Sleeper's +// asserted list and wakes the G up from its sleep if needed. +// +// Sleeper objects are expected to be used as follows, with just one goroutine +// executing this code: +// +// // One time set-up. +// s := sleep.Sleeper{} +// s.AddWaker(&w1, constant1) +// s.AddWaker(&w2, constant2) +// +// // Called repeatedly. +// for { +// switch id, _ := s.Fetch(true); id { +// case constant1: +// // Do work triggered by w1 being asserted. +// case constant2: +// // Do work triggered by w2 being asserted. +// } +// } +// +// And Waker objects are expected to call w.Assert() when they want the sleeper +// to wake up and perform work. +// +// The notifications are edge-triggered, which means that if a Waker calls +// Assert() several times before the sleeper has the chance to wake up, it will +// only be notified once and should perform all pending work (alternatively, it +// can also call Assert() on the waker, to ensure that it will wake up again). +// +// The "unsafeness" here is in the casts to/from unsafe.Pointer, which is safe +// when only one type is used for each unsafe.Pointer (which is the case here), +// we should just make sure that this remains the case in the future. The usage +// of unsafe package could be confined to sharedWaker and sharedSleeper types +// that would hold pointers in atomic.Pointers, but the go compiler currently +// can't optimize these as well (it won't inline their method calls), which +// reduces performance. +package sleep + +import ( + "sync/atomic" + "unsafe" +) + +const ( + // preparingG is stored in sleepers to indicate that they're preparing + // to sleep. + preparingG = 1 +) + +var ( + // assertedSleeper is a sentinel sleeper. A pointer to it is stored in + // wakers that are asserted. + assertedSleeper Sleeper +) + +//go:linkname gopark runtime.gopark +func gopark(unlockf func(uintptr, *uintptr) bool, wg *uintptr, reason string, traceEv byte, traceskip int) + +//go:linkname goready runtime.goready +func goready(g uintptr, traceskip int) + +// Sleeper allows a goroutine to sleep and receive wake up notifications from +// Wakers in an efficient way. +// +// This is similar to edge-triggered epoll in that wakers are added to the +// sleeper once and the sleeper can then repeatedly sleep in O(1) time while +// waiting on all wakers. +// +// None of the methods in a Sleeper can be called concurrently. Wakers that have +// been added to a sleeper A can only be added to another sleeper after A.Done() +// returns. These restrictions allow this to be implemented lock-free. +// +// This struct is thread-compatible. +type Sleeper struct { + // sharedList is a "stack" of asserted wakers. They atomically add + // themselves to the front of this list as they become asserted. + sharedList unsafe.Pointer + + // localList is a list of asserted wakers that is only accessible to the + // waiter, and thus doesn't have to be accessed atomically. When + // fetching more wakers, the waiter will first go through this list, and + // only when it's empty will it atomically fetch wakers from + // sharedList. + localList *Waker + + // allWakers is a list with all wakers that have been added to this + // sleeper. It is used during cleanup to remove associations. + allWakers *Waker + + // waitingG holds the G that is sleeping, if any. It is used by wakers + // to determine which G, if any, they should wake. + waitingG uintptr +} + +// AddWaker associates the given waker to the sleeper. id is the value to be +// returned when the sleeper is woken by the given waker. +func (s *Sleeper) AddWaker(w *Waker, id int) { + // Add the waker to the list of all wakers. + w.allWakersNext = s.allWakers + s.allWakers = w + w.id = id + + // Try to associate the waker with the sleeper. If it's already + // asserted, we simply enqueue it in the "ready" list. + for { + p := (*Sleeper)(atomic.LoadPointer(&w.s)) + if p == &assertedSleeper { + s.enqueueAssertedWaker(w) + return + } + + if atomic.CompareAndSwapPointer(&w.s, usleeper(p), usleeper(s)) { + return + } + } +} + +// nextWaker returns the next waker in the notification list, blocking if +// needed. +func (s *Sleeper) nextWaker(block bool) *Waker { + // Attempt to replenish the local list if it's currently empty. + if s.localList == nil { + for atomic.LoadPointer(&s.sharedList) == nil { + // Fail request if caller requested that we + // don't block. + if !block { + return nil + } + + // Indicate to wakers that we're about to sleep, + // this allows them to abort the wait by setting + // waitingG back to zero (which we'll notice + // before committing the sleep). + atomic.StoreUintptr(&s.waitingG, preparingG) + + // Check if something was queued while we were + // preparing to sleep. We need this interleaving + // to avoid missing wake ups. + if atomic.LoadPointer(&s.sharedList) != nil { + atomic.StoreUintptr(&s.waitingG, 0) + break + } + + // Try to commit the sleep and report it to the + // tracer as a select. + // + // gopark puts the caller to sleep and calls + // commitSleep to decide whether to immediately + // wake the caller up or to leave it sleeping. + const traceEvGoBlockSelect = 24 + gopark(commitSleep, &s.waitingG, "sleeper", traceEvGoBlockSelect, 0) + } + + // Pull the shared list out and reverse it in the local + // list. Given that wakers push themselves in reverse + // order, we fix things here. + v := (*Waker)(atomic.SwapPointer(&s.sharedList, nil)) + for v != nil { + cur := v + v = v.next + + cur.next = s.localList + s.localList = cur + } + } + + // Remove the waker in the front of the list. + w := s.localList + s.localList = w.next + + return w +} + +// Fetch fetches the next wake-up notification. If a notification is immediately +// available, it is returned right away. Otherwise, the behavior depends on the +// value of 'block': if true, the current goroutine blocks until a notification +// arrives, then returns it; if false, returns 'ok' as false. +// +// When 'ok' is true, the value of 'id' corresponds to the id associated with +// the waker; when 'ok' is false, 'id' is undefined. +// +// N.B. This method is *not* thread-safe. Only one goroutine at a time is +// allowed to call this method. +func (s *Sleeper) Fetch(block bool) (id int, ok bool) { + for { + w := s.nextWaker(block) + if w == nil { + return -1, false + } + + // Reassociate the waker with the sleeper. If the waker was + // still asserted we can return it, otherwise try the next one. + old := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(s))) + if old == &assertedSleeper { + return w.id, true + } + } +} + +// Done is used to indicate that the caller won't use this Sleeper anymore. It +// removes the association with all wakers so that they can be safely reused +// by another sleeper after Done() returns. +func (s *Sleeper) Done() { + // Remove all associations that we can, and build a list of the ones + // we could not. An association can be removed right away from waker w + // if w.s has a pointer to the sleeper, that is, the waker is not + // asserted yet. By atomically switching w.s to nil, we guarantee that + // subsequent calls to Assert() on the waker will not result in it being + // queued to this sleeper. + var pending *Waker + w := s.allWakers + for w != nil { + next := w.allWakersNext + for { + t := atomic.LoadPointer(&w.s) + if t != usleeper(s) { + w.allWakersNext = pending + pending = w + break + } + + if atomic.CompareAndSwapPointer(&w.s, t, nil) { + break + } + } + w = next + } + + // The associations that we could not remove are either asserted, or in + // the process of being asserted, or have been asserted and cleared + // before being pulled from the sleeper lists. We must wait for them all + // to make it to the sleeper lists, so that we know that the wakers + // won't do any more work towards waking this sleeper up. + for pending != nil { + pulled := s.nextWaker(true) + + // Remove the waker we just pulled from the list of associated + // wakers. + prev := &pending + for w := *prev; w != nil; w = *prev { + if pulled == w { + *prev = w.allWakersNext + break + } + prev = &w.allWakersNext + } + } + s.allWakers = nil +} + +// enqueueAssertedWaker enqueues an asserted waker to the "ready" circular list +// of wakers that want to notify the sleeper. +func (s *Sleeper) enqueueAssertedWaker(w *Waker) { + // Add the new waker to the front of the list. + for { + v := (*Waker)(atomic.LoadPointer(&s.sharedList)) + w.next = v + if atomic.CompareAndSwapPointer(&s.sharedList, uwaker(v), uwaker(w)) { + break + } + } + + for { + // Nothing to do if there isn't a G waiting. + g := atomic.LoadUintptr(&s.waitingG) + if g == 0 { + return + } + + // Signal to the sleeper that a waker has been asserted. + if atomic.CompareAndSwapUintptr(&s.waitingG, g, 0) { + if g != preparingG { + // We managed to get a G. Wake it up. + goready(g, 0) + } + } + } +} + +// Waker represents a source of wake-up notifications to be sent to sleepers. A +// waker can be associated with at most one sleeper at a time, and at any given +// time is either in asserted or non-asserted state. +// +// Once asserted, the waker remains so until it is manually cleared or a sleeper +// consumes its assertion (i.e., a sleeper wakes up or is prevented from going +// to sleep due to the waker). +// +// This struct is thread-safe, that is, its methods can be called concurrently +// by multiple goroutines. +type Waker struct { + // s is the sleeper that this waker can wake up. Only one sleeper at a + // time is allowed. This field can have three classes of values: + // nil -- the waker is not asserted: it either is not associated with + // a sleeper, or is queued to a sleeper due to being previously + // asserted. This is the zero value. + // &assertedSleeper -- the waker is asserted. + // otherwise -- the waker is not asserted, and is associated with the + // given sleeper. Once it transitions to asserted state, the + // associated sleeper will be woken. + s unsafe.Pointer + + // next is used to form a linked list of asserted wakers in a sleeper. + next *Waker + + // allWakersNext is used to form a linked list of all wakers associated + // to a given sleeper. + allWakersNext *Waker + + // id is the value to be returned to sleepers when they wake up due to + // this waker being asserted. + id int +} + +// Assert moves the waker to an asserted state, if it isn't asserted yet. When +// asserted, the waker will cause its matching sleeper to wake up. +func (w *Waker) Assert() { + // Nothing to do if the waker is already asserted. This check allows us + // to complete this case (already asserted) without any interlocked + // operations on x86. + if atomic.LoadPointer(&w.s) == usleeper(&assertedSleeper) { + return + } + + // Mark the waker as asserted, and wake up a sleeper if there is one. + switch s := (*Sleeper)(atomic.SwapPointer(&w.s, usleeper(&assertedSleeper))); s { + case nil: + case &assertedSleeper: + default: + s.enqueueAssertedWaker(w) + } +} + +// Clear moves the waker to then non-asserted state and returns whether it was +// asserted before being cleared. +// +// N.B. The waker isn't removed from the "ready" list of a sleeper (if it +// happens to be in one), but the sleeper will notice that it is not asserted +// anymore and won't return it to the caller. +func (w *Waker) Clear() bool { + // Nothing to do if the waker is not asserted. This check allows us to + // complete this case (already not asserted) without any interlocked + // operations on x86. + if atomic.LoadPointer(&w.s) != usleeper(&assertedSleeper) { + return false + } + + // Try to store nil in the sleeper, which indicates that the waker is + // not asserted. + return atomic.CompareAndSwapPointer(&w.s, usleeper(&assertedSleeper), nil) +} + +// IsAsserted returns whether the waker is currently asserted (i.e., if it's +// currently in a state that would cause its matching sleeper to wake up). +func (w *Waker) IsAsserted() bool { + return (*Sleeper)(atomic.LoadPointer(&w.s)) == &assertedSleeper +} + +func usleeper(s *Sleeper) unsafe.Pointer { + return unsafe.Pointer(s) +} + +func uwaker(w *Waker) unsafe.Pointer { + return unsafe.Pointer(w) +} diff --git a/pkg/state/BUILD b/pkg/state/BUILD new file mode 100644 index 000000000..bb6415d9b --- /dev/null +++ b/pkg/state/BUILD @@ -0,0 +1,77 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "addr_range", + out = "addr_range.go", + package = "state", + prefix = "addr", + template = "//pkg/segment:generic_range", + types = { + "T": "uintptr", + }, +) + +go_template_instance( + name = "addr_set", + out = "addr_set.go", + consts = { + "minDegree": "10", + }, + imports = { + "reflect": "reflect", + }, + package = "state", + prefix = "addr", + template = "//pkg/segment:generic_set", + types = { + "Key": "uintptr", + "Range": "addrRange", + "Value": "reflect.Value", + "Functions": "addrSetFunctions", + }, +) + +go_library( + name = "state", + srcs = [ + "addr_range.go", + "addr_set.go", + "decode.go", + "encode.go", + "encode_unsafe.go", + "map.go", + "printer.go", + "state.go", + "stats.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/state", + visibility = ["//:sandbox"], + deps = [ + ":object_go_proto", + "@com_github_golang_protobuf//proto:go_default_library", + ], +) + +proto_library( + name = "object_proto", + srcs = ["object.proto"], + visibility = ["//:sandbox"], +) + +go_proto_library( + name = "object_go_proto", + importpath = "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto", + proto = ":object_proto", + visibility = ["//:sandbox"], +) + +go_test( + name = "state_test", + size = "small", + srcs = ["state_test.go"], + embed = [":state"], +) diff --git a/pkg/state/decode.go b/pkg/state/decode.go new file mode 100644 index 000000000..05758495b --- /dev/null +++ b/pkg/state/decode.go @@ -0,0 +1,594 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "reflect" + "sort" + + "github.com/golang/protobuf/proto" + pb "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto" +) + +// objectState represents an object that may be in the process of being +// decoded. Specifically, it represents either a decoded object, or an an +// interest in a future object that will be decoded. When that interest is +// registered (via register), the storage for the object will be created, but +// it will not be decoded until the object is encountered in the stream. +type objectState struct { + // id is the id for this object. + // + // If this field is zero, then this is an anonymous (unregistered, + // non-reference primitive) object. This is immutable. + id uint64 + + // obj is the object. This may or may not be valid yet, depending on + // whether complete returns true. However, regardless of whether the + // object is valid, obj contains a final storage location for the + // object. This is immutable. + // + // Note that this must be addressable (obj.Addr() must not panic). + // + // The obj passed to the decode methods below will equal this obj only + // in the case of decoding the top-level object. However, the passed + // obj may represent individual fields, elements of a slice, etc. that + // are effectively embedded within the reflect.Value below but with + // distinct types. + obj reflect.Value + + // blockedBy is the number of dependencies this object has. + blockedBy int + + // blocking is a list of the objects blocked by this one. + blocking []*objectState + + // callbacks is a set of callbacks to execute on load. + callbacks []func() + + // path is the decoding path to the object. + path recoverable +} + +// complete indicates the object is complete. +func (os *objectState) complete() bool { + return os.blockedBy == 0 && len(os.callbacks) == 0 +} + +// checkComplete checks for completion. If the object is complete, pending +// callbacks will be executed and checkComplete will be called on downstream +// objects (those depending on this one). +func (os *objectState) checkComplete(stats *Stats) { + if os.blockedBy > 0 { + return + } + + // Fire all callbacks. + for _, fn := range os.callbacks { + stats.Start(os.obj) + fn() + stats.Done() + } + os.callbacks = nil + + // Clear all blocked objects. + for _, other := range os.blocking { + other.blockedBy-- + other.checkComplete(stats) + } + os.blocking = nil +} + +// waitFor queues a dependency on the given object. +func (os *objectState) waitFor(other *objectState, callback func()) { + os.blockedBy++ + other.blocking = append(other.blocking, os) + if callback != nil { + other.callbacks = append(other.callbacks, callback) + } +} + +// findCycleFor returns when the given object is found in the blocking set. +func (os *objectState) findCycleFor(target *objectState) []*objectState { + for _, other := range os.blocking { + if other == target { + return []*objectState{target} + } else if childList := other.findCycleFor(target); childList != nil { + return append(childList, other) + } + } + return nil +} + +// findCycle finds a dependency cycle. +func (os *objectState) findCycle() []*objectState { + return append(os.findCycleFor(os), os) +} + +// decodeState is a graph of objects in the process of being decoded. +// +// The decode process involves loading the breadth-first graph generated by +// encode. This graph is read in it's entirety, ensuring that all object +// storage is complete. +// +// As the graph is being serialized, a set of completion callbacks are +// executed. These completion callbacks should form a set of acyclic subgraphs +// over the original one. After decoding is complete, the objects are scanned +// to ensure that all callbacks are executed, otherwise the callback graph was +// not acyclic. +type decodeState struct { + // objectByID is the set of objects in progress. + objectsByID map[uint64]*objectState + + // deferred are objects that have been read, by no interest has been + // registered yet. These will be decoded once interest in registered. + deferred map[uint64]*pb.Object + + // outstanding is the number of outstanding objects. + outstanding uint32 + + // r is the input stream. + r io.Reader + + // stats is the passed stats object. + stats *Stats + + // recoverable is the panic recover facility. + recoverable +} + +// lookup looks up an object in decodeState or returns nil if no such object +// has been previously registered. +func (ds *decodeState) lookup(id uint64) *objectState { + return ds.objectsByID[id] +} + +// wait registers a dependency on an object. +// +// As a special case, we always allow _useable_ references back to the first +// decoding object because it may have fields that are already decoded. We also +// allow trivial self reference, since they can be handled internally. +func (ds *decodeState) wait(waiter *objectState, id uint64, callback func()) { + switch id { + case 0: + // Nil pointer; nothing to wait for. + fallthrough + case waiter.id: + // Trivial self reference. + fallthrough + case 1: + // Root object; see above. + if callback != nil { + callback() + } + return + } + + // No nil can be returned here. + waiter.waitFor(ds.lookup(id), callback) +} + +// waitObject notes a blocking relationship. +func (ds *decodeState) waitObject(os *objectState, p *pb.Object, callback func()) { + if rv, ok := p.Value.(*pb.Object_RefValue); ok { + // Refs can encode pointers and maps. + ds.wait(os, rv.RefValue, callback) + } else if sv, ok := p.Value.(*pb.Object_SliceValue); ok { + // See decodeObject; we need to wait for the array (if non-nil). + ds.wait(os, sv.SliceValue.RefValue, callback) + } else if iv, ok := p.Value.(*pb.Object_InterfaceValue); ok { + // It's an interface (wait recurisvely). + ds.waitObject(os, iv.InterfaceValue.Value, callback) + } else if callback != nil { + // Nothing to wait for: execute the callback immediately. + callback() + } +} + +// register registers a decode with a type. +// +// This type is only used to instantiate a new object if it has not been +// registered previously. +func (ds *decodeState) register(id uint64, typ reflect.Type) *objectState { + os, ok := ds.objectsByID[id] + if ok { + return os + } + + // Record in the object index. + if typ.Kind() == reflect.Map { + os = &objectState{id: id, obj: reflect.MakeMap(typ), path: ds.recoverable.copy()} + } else { + os = &objectState{id: id, obj: reflect.New(typ).Elem(), path: ds.recoverable.copy()} + } + ds.objectsByID[id] = os + + if o, ok := ds.deferred[id]; ok { + // There is a deferred object. + delete(ds.deferred, id) // Free memory. + ds.decodeObject(os, os.obj, o, "", nil) + } else { + // There is no deferred object. + ds.outstanding++ + } + + return os +} + +// decodeStruct decodes a struct value. +func (ds *decodeState) decodeStruct(os *objectState, obj reflect.Value, s *pb.Struct) { + // Set the fields. + m := Map{newInternalMap(nil, ds, os)} + defer internalMapPool.Put(m.internalMap) + for _, field := range s.Fields { + m.data = append(m.data, entry{ + name: field.Name, + object: field.Value, + }) + } + + // Sort the fields for efficient searching. + // + // Technically, these should already appear in sorted order in the + // state ordering, so this cost is effectively a single scan to ensure + // that the order is correct. + if len(m.data) > 1 { + sort.Slice(m.data, func(i, j int) bool { + return m.data[i].name < m.data[j].name + }) + } + + // Invoke the load; this will recursively decode other objects. + fns, ok := registeredTypes.lookupFns(obj.Addr().Type()) + if ok { + // Invoke the loader. + fns.invokeLoad(obj.Addr(), m) + } else if obj.NumField() == 0 { + // Allow anonymous empty structs. + return + } else { + // Propagate an error. + panic(fmt.Errorf("unregistered type %s", obj.Type())) + } +} + +// decodeMap decodes a map value. +func (ds *decodeState) decodeMap(os *objectState, obj reflect.Value, m *pb.Map) { + if obj.IsNil() { + obj.Set(reflect.MakeMap(obj.Type())) + } + for i := 0; i < len(m.Keys); i++ { + // Decode the objects. + kv := reflect.New(obj.Type().Key()).Elem() + vv := reflect.New(obj.Type().Elem()).Elem() + ds.decodeObject(os, kv, m.Keys[i], ".(key %d)", i) + ds.decodeObject(os, vv, m.Values[i], "[%#v]", kv.Interface()) + ds.waitObject(os, m.Keys[i], nil) + ds.waitObject(os, m.Values[i], nil) + + // Set in the map. + obj.SetMapIndex(kv, vv) + } +} + +// decodeArray decodes an array value. +func (ds *decodeState) decodeArray(os *objectState, obj reflect.Value, a *pb.Array) { + if len(a.Contents) != obj.Len() { + panic(fmt.Errorf("mismatching array length expect=%d, actual=%d", obj.Len(), len(a.Contents))) + } + // Decode the contents into the array. + for i := 0; i < len(a.Contents); i++ { + ds.decodeObject(os, obj.Index(i), a.Contents[i], "[%d]", i) + ds.waitObject(os, a.Contents[i], nil) + } +} + +// decodeInterface decodes an interface value. +func (ds *decodeState) decodeInterface(os *objectState, obj reflect.Value, i *pb.Interface) { + // Is this a nil value? + if i.Type == "" { + return // Just leave obj alone. + } + + // Get the dispatchable type. This may not be used if the given + // reference has already been resolved, but if not we need to know the + // type to create. + t, ok := registeredTypes.lookupType(i.Type) + if !ok { + panic(fmt.Errorf("no valid type for %q", i.Type)) + } + + if obj.Kind() != reflect.Map { + // Set the obj to be the given typed value; this actually sets + // obj to be a non-zero value -- namely, it inserts type + // information. There's no need to do this for maps. + obj.Set(reflect.Zero(t)) + } + + // Decode the dereferenced element; there is no need to wait here, as + // the interface object shares the current object state. + ds.decodeObject(os, obj, i.Value, ".(%s)", i.Type) +} + +// decodeObject decodes a object value. +func (ds *decodeState) decodeObject(os *objectState, obj reflect.Value, object *pb.Object, format string, param interface{}) { + ds.push(false, format, param) + ds.stats.Start(obj) + + switch x := object.GetValue().(type) { + case *pb.Object_BoolValue: + obj.SetBool(x.BoolValue) + case *pb.Object_StringValue: + obj.SetString(x.StringValue) + case *pb.Object_Int64Value: + obj.SetInt(x.Int64Value) + if obj.Int() != x.Int64Value { + panic(fmt.Errorf("signed integer truncated in %v for %s", object, obj.Type())) + } + case *pb.Object_Uint64Value: + obj.SetUint(x.Uint64Value) + if obj.Uint() != x.Uint64Value { + panic(fmt.Errorf("unsigned integer truncated in %v for %s", object, obj.Type())) + } + case *pb.Object_DoubleValue: + obj.SetFloat(x.DoubleValue) + if obj.Float() != x.DoubleValue { + panic(fmt.Errorf("float truncated in %v for %s", object, obj.Type())) + } + case *pb.Object_RefValue: + // Resolve the pointer itself, even though the object may not + // be decoded yet. You need to use wait() in order to ensure + // that is the case. See wait above, and Map.Barrier. + if id := x.RefValue; id != 0 { + // Decoding the interface should have imparted type + // information, so from this point it's safe to resolve + // and use this dynamic information for actually + // creating the object in register. + // + // (For non-interfaces this is a no-op). + dyntyp := reflect.TypeOf(obj.Interface()) + if dyntyp.Kind() == reflect.Map { + obj.Set(ds.register(id, dyntyp).obj) + } else if dyntyp.Kind() == reflect.Ptr { + ds.push(true /* dereference */, "", nil) + obj.Set(ds.register(id, dyntyp.Elem()).obj.Addr()) + ds.pop() + } else { + obj.Set(ds.register(id, dyntyp.Elem()).obj.Addr()) + } + } else { + // We leave obj alone here. That's because if obj + // represents an interface, it may have been embued + // with type information in decodeInterface, and we + // don't want to destroy that information. + } + case *pb.Object_SliceValue: + // It's okay to slice the array here, since the contents will + // still be provided later on. These semantics are a bit + // strange but they are handled in the Map.Barrier properly. + // + // The special semantics of zero ref apply here too. + if id := x.SliceValue.RefValue; id != 0 && x.SliceValue.Capacity > 0 { + v := reflect.ArrayOf(int(x.SliceValue.Capacity), obj.Type().Elem()) + obj.Set(ds.register(id, v).obj.Slice3(0, int(x.SliceValue.Length), int(x.SliceValue.Capacity))) + } + case *pb.Object_ArrayValue: + ds.decodeArray(os, obj, x.ArrayValue) + case *pb.Object_StructValue: + ds.decodeStruct(os, obj, x.StructValue) + case *pb.Object_MapValue: + ds.decodeMap(os, obj, x.MapValue) + case *pb.Object_InterfaceValue: + ds.decodeInterface(os, obj, x.InterfaceValue) + case *pb.Object_ByteArrayValue: + copyArray(obj, reflect.ValueOf(x.ByteArrayValue)) + case *pb.Object_Uint16ArrayValue: + // 16-bit slices are serialized as 32-bit slices. + // See object.proto for details. + s := x.Uint16ArrayValue.Values + t := obj.Slice(0, obj.Len()).Interface().([]uint16) + if len(t) != len(s) { + panic(fmt.Errorf("mismatching array length expect=%d, actual=%d", len(t), len(s))) + } + for i := range s { + t[i] = uint16(s[i]) + } + case *pb.Object_Uint32ArrayValue: + copyArray(obj, reflect.ValueOf(x.Uint32ArrayValue.Values)) + case *pb.Object_Uint64ArrayValue: + copyArray(obj, reflect.ValueOf(x.Uint64ArrayValue.Values)) + case *pb.Object_UintptrArrayValue: + copyArray(obj, castSlice(reflect.ValueOf(x.UintptrArrayValue.Values), reflect.TypeOf(uintptr(0)))) + case *pb.Object_Int8ArrayValue: + copyArray(obj, castSlice(reflect.ValueOf(x.Int8ArrayValue.Values), reflect.TypeOf(int8(0)))) + case *pb.Object_Int16ArrayValue: + // 16-bit slices are serialized as 32-bit slices. + // See object.proto for details. + s := x.Int16ArrayValue.Values + t := obj.Slice(0, obj.Len()).Interface().([]int16) + if len(t) != len(s) { + panic(fmt.Errorf("mismatching array length expect=%d, actual=%d", len(t), len(s))) + } + for i := range s { + t[i] = int16(s[i]) + } + case *pb.Object_Int32ArrayValue: + copyArray(obj, reflect.ValueOf(x.Int32ArrayValue.Values)) + case *pb.Object_Int64ArrayValue: + copyArray(obj, reflect.ValueOf(x.Int64ArrayValue.Values)) + case *pb.Object_BoolArrayValue: + copyArray(obj, reflect.ValueOf(x.BoolArrayValue.Values)) + case *pb.Object_Float64ArrayValue: + copyArray(obj, reflect.ValueOf(x.Float64ArrayValue.Values)) + case *pb.Object_Float32ArrayValue: + copyArray(obj, reflect.ValueOf(x.Float32ArrayValue.Values)) + default: + // Shoud not happen, not propagated as an error. + panic(fmt.Sprintf("unknown object %v for %s", object, obj.Type())) + } + + ds.stats.Done() + ds.pop() +} + +func copyArray(dest reflect.Value, src reflect.Value) { + if dest.Len() != src.Len() { + panic(fmt.Errorf("mismatching array length expect=%d, actual=%d", dest.Len(), src.Len())) + } + reflect.Copy(dest, castSlice(src, dest.Type().Elem())) +} + +// Deserialize deserializes the object state. +// +// This function may panic and should be run in safely(). +func (ds *decodeState) Deserialize(obj reflect.Value) { + ds.objectsByID[1] = &objectState{id: 1, obj: obj, path: ds.recoverable.copy()} + ds.outstanding = 1 // The root object. + + // Decode all objects in the stream. + // + // See above, we never process objects while we have no outstanding + // interests (other than the very first object). + for id := uint64(1); ds.outstanding > 0; id++ { + o, err := ds.readObject() + if err != nil { + panic(err) + } + + os := ds.lookup(id) + if os != nil { + // Decode the object. + ds.from = &os.path + ds.decodeObject(os, os.obj, o, "", nil) + ds.outstanding-- + } else { + // If an object hasn't had interest registered + // previously, we deferred decoding until interest is + // registered. + ds.deferred[id] = o + } + } + + // Check the zero-length header at the end. + length, object, err := ReadHeader(ds.r) + if err != nil { + panic(err) + } + if length != 0 { + panic(fmt.Sprintf("expected zero-length terminal, got %d", length)) + } + if object { + panic("expected non-object terminal") + } + + // Check if we have any deferred objects. + if count := len(ds.deferred); count > 0 { + // Shoud not happen, not propagated as an error. + panic(fmt.Sprintf("still have %d deferred objects", count)) + } + + // Scan and fire all callbacks. + for _, os := range ds.objectsByID { + os.checkComplete(ds.stats) + } + + // Check if we have any remaining dependency cycles. + for _, os := range ds.objectsByID { + if !os.complete() { + // This must be the result of a dependency cycle. + cycle := os.findCycle() + var buf bytes.Buffer + buf.WriteString("dependency cycle: {") + for i, cycleOS := range cycle { + if i > 0 { + buf.WriteString(" => ") + } + buf.WriteString(fmt.Sprintf("%s", cycleOS.obj.Type())) + } + buf.WriteString("}") + // Panic as an error; propagate to the caller. + panic(errors.New(string(buf.Bytes()))) + } + } +} + +type byteReader struct { + io.Reader +} + +// ReadByte implements io.ByteReader. +func (br byteReader) ReadByte() (byte, error) { + var b [1]byte + n, err := br.Reader.Read(b[:]) + if n > 0 { + return b[0], nil + } else if err != nil { + return 0, err + } else { + return 0, io.ErrUnexpectedEOF + } +} + +// ReadHeader reads an object header. +// +// Each object written to the statefile is prefixed with a header. See +// WriteHeader for more information; these functions are exported to allow +// non-state writes to the file to play nice with debugging tools. +func ReadHeader(r io.Reader) (length uint64, object bool, err error) { + // Read the header. + length, err = binary.ReadUvarint(byteReader{r}) + if err != nil { + return + } + + // Decode whether the object is valid. + object = length&0x1 != 0 + length = length >> 1 + return +} + +// readObject reads an object from the stream. +func (ds *decodeState) readObject() (*pb.Object, error) { + // Read the header. + length, object, err := ReadHeader(ds.r) + if err != nil { + return nil, err + } + if !object { + return nil, fmt.Errorf("invalid object header") + } + + // Read the object. + buf := make([]byte, length) + for done := 0; done < len(buf); { + n, err := ds.r.Read(buf[done:]) + done += n + if n == 0 && err != nil { + return nil, err + } + } + + // Unmarshal. + obj := new(pb.Object) + if err := proto.Unmarshal(buf, obj); err != nil { + return nil, err + } + + return obj, nil +} diff --git a/pkg/state/encode.go b/pkg/state/encode.go new file mode 100644 index 000000000..eb6527afc --- /dev/null +++ b/pkg/state/encode.go @@ -0,0 +1,454 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "container/list" + "encoding/binary" + "fmt" + "io" + "reflect" + "sort" + + "github.com/golang/protobuf/proto" + pb "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto" +) + +// queuedObject is an object queued for encoding. +type queuedObject struct { + id uint64 + obj reflect.Value + path recoverable +} + +// encodeState is state used for encoding. +// +// The encoding process is a breadth-first traversal of the object graph. The +// inherent races and dependencies are much simpler than the decode case. +type encodeState struct { + // lastID is the last object ID. + // + // See idsByObject for context. Because of the special zero encoding + // used for reference values, the first ID must be 1. + lastID uint64 + + // idsByObject is a set of objects, indexed via: + // + // reflect.ValueOf(x).UnsafeAddr + // + // This provides IDs for objects. + idsByObject map[uintptr]uint64 + + // values stores values that span the addresses. + // + // addrSet is a a generated type which efficiently stores ranges of + // addresses. When encoding pointers, these ranges are filled in and + // used to check for overlapping or conflicting pointers. This would + // indicate a pointer to an field, or a non-type safe value, neither of + // which are currently decodable. + // + // See the usage of values below for more context. + values addrSet + + // w is the output stream. + w io.Writer + + // pending is the list of objects to be serialized. + // + // This is a set of queuedObjects. + pending list.List + + // done is the a list of finished objects. + // + // This is kept to prevent garbage collection and address reuse. + done list.List + + // stats is the passed stats object. + stats *Stats + + // recoverable is the panic recover facility. + recoverable +} + +// register looks up an ID, registering if necessary. +// +// If the object was not previosly registered, it is enqueued to be serialized. +// See the documentation for idsByObject for more information. +func (es *encodeState) register(obj reflect.Value) uint64 { + // It is not legal to call register for any non-pointer objects (see + // below), so we panic with a recoverable error if this is a mismatch. + if obj.Kind() != reflect.Ptr && obj.Kind() != reflect.Map { + panic(fmt.Errorf("non-pointer %#v registered", obj.Interface())) + } + + addr := obj.Pointer() + if obj.Kind() == reflect.Ptr && obj.Elem().Type().Size() == 0 { + // For zero-sized objects, we always provide a unique ID. + // That's because the runtime internally multiplexes pointers + // to the same address. We can't be certain what the intent is + // with pointers to zero-sized objects, so we just give them + // all unique identities. + } else if id, ok := es.idsByObject[addr]; ok { + // Already registered. + return id + } + + // Ensure that the first ID given out is one. See note on lastID. The + // ID zero is used to indicate nil values. + es.lastID++ + id := es.lastID + es.idsByObject[addr] = id + if obj.Kind() == reflect.Ptr { + // Dereference and treat as a pointer. + es.pending.PushBack(queuedObject{id: id, obj: obj.Elem(), path: es.recoverable.copy()}) + + // Register this object at all addresses. + typ := obj.Elem().Type() + if size := typ.Size(); size > 0 { + r := addrRange{addr, addr + size} + if !es.values.IsEmptyRange(r) { + panic(fmt.Errorf("overlapping objects: [new object] %#v [existing object] %#v", obj.Interface(), es.values.FindSegment(addr).Value().Elem().Interface())) + } + es.values.Add(r, obj) + } + } else { + // Push back the map itself; when maps are encoded from the + // top-level, forceMap will be equal to true. + es.pending.PushBack(queuedObject{id: id, obj: obj, path: es.recoverable.copy()}) + } + + return id +} + +// encodeMap encodes a map. +func (es *encodeState) encodeMap(obj reflect.Value) *pb.Map { + var ( + keys []*pb.Object + values []*pb.Object + ) + for i, k := range obj.MapKeys() { + v := obj.MapIndex(k) + kp := es.encodeObject(k, false, ".(key %d)", i) + vp := es.encodeObject(v, false, "[%#v]", k.Interface()) + keys = append(keys, kp) + values = append(values, vp) + } + return &pb.Map{Keys: keys, Values: values} +} + +// encodeStruct encodes a composite object. +func (es *encodeState) encodeStruct(obj reflect.Value) *pb.Struct { + // Invoke the save. + m := Map{newInternalMap(es, nil, nil)} + defer internalMapPool.Put(m.internalMap) + if !obj.CanAddr() { + // Force it to a * type of the above; this involves a copy. + localObj := reflect.New(obj.Type()) + localObj.Elem().Set(obj) + obj = localObj.Elem() + } + fns, ok := registeredTypes.lookupFns(obj.Addr().Type()) + if ok { + // Invoke the provided saver. + fns.invokeSave(obj.Addr(), m) + } else if obj.NumField() == 0 { + // Allow unregistered anonymous, empty structs. + return &pb.Struct{} + } else { + // Propagate an error. + panic(fmt.Errorf("unregistered type %T", obj.Interface())) + } + + // Sort the underlying slice, and check for duplicates. This is done + // once instead of on each add, because performing this sort once is + // far more efficient. + if len(m.data) > 1 { + sort.Slice(m.data, func(i, j int) bool { + return m.data[i].name < m.data[j].name + }) + for i := range m.data { + if i > 0 && m.data[i-1].name == m.data[i].name { + panic(fmt.Errorf("duplicate name %s", m.data[i].name)) + } + } + } + + // Encode the resulting fields. + fields := make([]*pb.Field, 0, len(m.data)) + for _, e := range m.data { + fields = append(fields, &pb.Field{ + Name: e.name, + Value: e.object, + }) + } + + // Return the encoded object. + return &pb.Struct{Fields: fields} +} + +// encodeArray encodes an array. +func (es *encodeState) encodeArray(obj reflect.Value) *pb.Array { + var ( + contents []*pb.Object + ) + for i := 0; i < obj.Len(); i++ { + entry := es.encodeObject(obj.Index(i), false, "[%d]", i) + contents = append(contents, entry) + } + return &pb.Array{Contents: contents} +} + +// encodeInterface encodes an interface. +// +// Precondition: the value is not nil. +func (es *encodeState) encodeInterface(obj reflect.Value) *pb.Interface { + // Check for the nil interface. + obj = reflect.ValueOf(obj.Interface()) + if !obj.IsValid() { + return &pb.Interface{ + Type: "", // left alone in decode. + Value: &pb.Object{Value: &pb.Object_RefValue{0}}, + } + } + // We have an interface value here. How do we save that? We + // resolve the underlying type and save it as a dispatchable. + typName, ok := registeredTypes.lookupName(obj.Type()) + if !ok { + panic(fmt.Errorf("type %s is not registered", obj.Type())) + } + + // Encode the object again. + return &pb.Interface{ + Type: typName, + Value: es.encodeObject(obj, false, ".(%s)", typName), + } +} + +// encodeObject encodes an object. +// +// If mapAsValue is true, then a map will be encoded directly. +func (es *encodeState) encodeObject(obj reflect.Value, mapAsValue bool, format string, param interface{}) (object *pb.Object) { + es.push(false, format, param) + es.stats.Start(obj) + + switch obj.Kind() { + case reflect.Bool: + object = &pb.Object{Value: &pb.Object_BoolValue{obj.Bool()}} + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + object = &pb.Object{Value: &pb.Object_Int64Value{obj.Int()}} + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + object = &pb.Object{Value: &pb.Object_Uint64Value{obj.Uint()}} + case reflect.Float32, reflect.Float64: + object = &pb.Object{Value: &pb.Object_DoubleValue{obj.Float()}} + case reflect.Array: + switch obj.Type().Elem().Kind() { + case reflect.Uint8: + object = &pb.Object{Value: &pb.Object_ByteArrayValue{pbSlice(obj).Interface().([]byte)}} + case reflect.Uint16: + // 16-bit slices are serialized as 32-bit slices. + // See object.proto for details. + s := pbSlice(obj).Interface().([]uint16) + t := make([]uint32, len(s)) + for i := range s { + t[i] = uint32(s[i]) + } + object = &pb.Object{Value: &pb.Object_Uint16ArrayValue{&pb.Uint16S{Values: t}}} + case reflect.Uint32: + object = &pb.Object{Value: &pb.Object_Uint32ArrayValue{&pb.Uint32S{Values: pbSlice(obj).Interface().([]uint32)}}} + case reflect.Uint64: + object = &pb.Object{Value: &pb.Object_Uint64ArrayValue{&pb.Uint64S{Values: pbSlice(obj).Interface().([]uint64)}}} + case reflect.Uintptr: + object = &pb.Object{Value: &pb.Object_UintptrArrayValue{&pb.Uintptrs{Values: pbSlice(obj).Interface().([]uint64)}}} + case reflect.Int8: + object = &pb.Object{Value: &pb.Object_Int8ArrayValue{&pb.Int8S{Values: pbSlice(obj).Interface().([]byte)}}} + case reflect.Int16: + // 16-bit slices are serialized as 32-bit slices. + // See object.proto for details. + s := pbSlice(obj).Interface().([]int16) + t := make([]int32, len(s)) + for i := range s { + t[i] = int32(s[i]) + } + object = &pb.Object{Value: &pb.Object_Int16ArrayValue{&pb.Int16S{Values: t}}} + case reflect.Int32: + object = &pb.Object{Value: &pb.Object_Int32ArrayValue{&pb.Int32S{Values: pbSlice(obj).Interface().([]int32)}}} + case reflect.Int64: + object = &pb.Object{Value: &pb.Object_Int64ArrayValue{&pb.Int64S{Values: pbSlice(obj).Interface().([]int64)}}} + case reflect.Bool: + object = &pb.Object{Value: &pb.Object_BoolArrayValue{&pb.Bools{Values: pbSlice(obj).Interface().([]bool)}}} + case reflect.Float32: + object = &pb.Object{Value: &pb.Object_Float32ArrayValue{&pb.Float32S{Values: pbSlice(obj).Interface().([]float32)}}} + case reflect.Float64: + object = &pb.Object{Value: &pb.Object_Float64ArrayValue{&pb.Float64S{Values: pbSlice(obj).Interface().([]float64)}}} + default: + object = &pb.Object{Value: &pb.Object_ArrayValue{es.encodeArray(obj)}} + } + case reflect.Slice: + if obj.IsNil() || obj.Cap() == 0 { + // Handled specially in decode; store as nil value. + object = &pb.Object{Value: &pb.Object_RefValue{0}} + } else { + // Serialize a slice as the array plus length and capacity. + object = &pb.Object{Value: &pb.Object_SliceValue{&pb.Slice{ + Capacity: uint32(obj.Cap()), + Length: uint32(obj.Len()), + RefValue: es.register(arrayFromSlice(obj)), + }}} + } + case reflect.String: + object = &pb.Object{Value: &pb.Object_StringValue{obj.String()}} + case reflect.Ptr: + if obj.IsNil() { + // Handled specially in decode; store as a nil value. + object = &pb.Object{Value: &pb.Object_RefValue{0}} + } else { + es.push(true /* dereference */, "", nil) + object = &pb.Object{Value: &pb.Object_RefValue{es.register(obj)}} + es.pop() + } + case reflect.Interface: + // We don't check for IsNil here, as we want to encode type + // information. The case of the empty interface (no type, no + // value) is handled by encodeInteface. + object = &pb.Object{Value: &pb.Object_InterfaceValue{es.encodeInterface(obj)}} + case reflect.Struct: + object = &pb.Object{Value: &pb.Object_StructValue{es.encodeStruct(obj)}} + case reflect.Map: + if obj.IsNil() { + // Handled specially in decode; store as a nil value. + object = &pb.Object{Value: &pb.Object_RefValue{0}} + } else if mapAsValue { + // Encode the map directly. + object = &pb.Object{Value: &pb.Object_MapValue{es.encodeMap(obj)}} + } else { + // Encode a reference to the map. + object = &pb.Object{Value: &pb.Object_RefValue{es.register(obj)}} + } + default: + panic(fmt.Errorf("unknown primitive %#v", obj.Interface())) + } + + es.stats.Done() + es.pop() + return +} + +// Serialize serializes the object state. +// +// This function may panic and should be run in safely(). +func (es *encodeState) Serialize(obj reflect.Value) { + es.register(obj.Addr()) + + // Pop off the list until we're done. + for es.pending.Len() > 0 { + e := es.pending.Front() + es.pending.Remove(e) + + // Extract the queued object. + qo := e.Value.(queuedObject) + es.from = &qo.path + o := es.encodeObject(qo.obj, true, "", nil) + + // Emit to our output stream. + if err := es.writeObject(qo.id, o); err != nil { + panic(err) + } + + // Mark as done. + es.done.PushBack(e) + } + + // Write a zero-length terminal at the end; this is a sanity check + // applied at decode time as well (see decode.go). + if err := WriteHeader(es.w, 0, false); err != nil { + panic(err) + } +} + +// WriteHeader writes a header. +// +// Each object written to the statefile should be prefixed with a header. In +// order to generate statefiles that play nicely with debugging tools, raw +// writes should be prefixed with a header with object set to false and the +// appropriate length. This will allow tools to skip these regions. +func WriteHeader(w io.Writer, length uint64, object bool) error { + // The lowest-order bit encodes whether this is a valid object. This is + // a purely internal convention, but allows the object flag to be + // returned from ReadHeader. + length = length << 1 + if object { + length |= 0x1 + } + + // Write a header. + var hdr [32]byte + encodedLen := binary.PutUvarint(hdr[:], length) + for done := 0; done < encodedLen; { + n, err := w.Write(hdr[done:encodedLen]) + done += n + if n == 0 && err != nil { + return err + } + } + + return nil +} + +// writeObject writes an object to the stream. +func (es *encodeState) writeObject(id uint64, obj *pb.Object) error { + // Marshal the proto. + buf, err := proto.Marshal(obj) + if err != nil { + return err + } + + // Write the object header. + if err := WriteHeader(es.w, uint64(len(buf)), true); err != nil { + return err + } + + // Write the object. + for done := 0; done < len(buf); { + n, err := es.w.Write(buf[done:]) + done += n + if n == 0 && err != nil { + return err + } + } + + return nil +} + +// addrSetFunctions is used by addrSet. +type addrSetFunctions struct{} + +func (addrSetFunctions) MinKey() uintptr { + return 0 +} + +func (addrSetFunctions) MaxKey() uintptr { + return ^uintptr(0) +} + +func (addrSetFunctions) ClearValue(val *reflect.Value) { +} + +func (addrSetFunctions) Merge(_ addrRange, val1 reflect.Value, _ addrRange, val2 reflect.Value) (reflect.Value, bool) { + return val1, val1 == val2 +} + +func (addrSetFunctions) Split(_ addrRange, val reflect.Value, _ uintptr) (reflect.Value, reflect.Value) { + return val, val +} diff --git a/pkg/state/encode_unsafe.go b/pkg/state/encode_unsafe.go new file mode 100644 index 000000000..d96ba56d4 --- /dev/null +++ b/pkg/state/encode_unsafe.go @@ -0,0 +1,81 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "reflect" + "unsafe" +) + +// arrayFromSlice constructs a new pointer to the slice data. +// +// It would be similar to the following: +// +// x := make([]Foo, l, c) +// a := ([l]Foo*)(unsafe.Pointer(x[0])) +// +func arrayFromSlice(obj reflect.Value) reflect.Value { + return reflect.NewAt( + reflect.ArrayOf(obj.Cap(), obj.Type().Elem()), + unsafe.Pointer(obj.Pointer())) +} + +// pbSlice returns a protobuf-supported slice of the array and erase the +// original element type (which could be a defined type or non-supported type). +func pbSlice(obj reflect.Value) reflect.Value { + var typ reflect.Type + switch obj.Type().Elem().Kind() { + case reflect.Uint8: + typ = reflect.TypeOf(byte(0)) + case reflect.Uint16: + typ = reflect.TypeOf(uint16(0)) + case reflect.Uint32: + typ = reflect.TypeOf(uint32(0)) + case reflect.Uint64: + typ = reflect.TypeOf(uint64(0)) + case reflect.Uintptr: + typ = reflect.TypeOf(uint64(0)) + case reflect.Int8: + typ = reflect.TypeOf(byte(0)) + case reflect.Int16: + typ = reflect.TypeOf(int16(0)) + case reflect.Int32: + typ = reflect.TypeOf(int32(0)) + case reflect.Int64: + typ = reflect.TypeOf(int64(0)) + case reflect.Bool: + typ = reflect.TypeOf(bool(false)) + case reflect.Float32: + typ = reflect.TypeOf(float32(0)) + case reflect.Float64: + typ = reflect.TypeOf(float64(0)) + default: + panic("slice element is not of basic value type") + } + return reflect.NewAt( + reflect.ArrayOf(obj.Len(), typ), + unsafe.Pointer(obj.Slice(0, obj.Len()).Pointer()), + ).Elem().Slice(0, obj.Len()) +} + +func castSlice(obj reflect.Value, elemTyp reflect.Type) reflect.Value { + if obj.Type().Elem().Size() != elemTyp.Size() { + panic("cannot cast slice into other element type of different size") + } + return reflect.NewAt( + reflect.ArrayOf(obj.Len(), elemTyp), + unsafe.Pointer(obj.Slice(0, obj.Len()).Pointer()), + ).Elem() +} diff --git a/pkg/state/map.go b/pkg/state/map.go new file mode 100644 index 000000000..c3d165501 --- /dev/null +++ b/pkg/state/map.go @@ -0,0 +1,221 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "fmt" + "reflect" + "sort" + "sync" + + pb "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto" +) + +// entry is a single map entry. +type entry struct { + name string + object *pb.Object +} + +// internalMap is the internal Map state. +// +// These are recycled via a pool to avoid churn. +type internalMap struct { + // es is encodeState. + es *encodeState + + // ds is decodeState. + ds *decodeState + + // os is current object being decoded. + // + // This will always be nil during encode. + os *objectState + + // data stores the encoded values. + data []entry +} + +var internalMapPool = sync.Pool{ + New: func() interface{} { + return new(internalMap) + }, +} + +// newInternalMap returns a cached map. +func newInternalMap(es *encodeState, ds *decodeState, os *objectState) *internalMap { + m := internalMapPool.Get().(*internalMap) + m.es = es + m.ds = ds + m.os = os + if m.data != nil { + m.data = m.data[:0] + } + return m +} + +// Map is a generic state container. +// +// This is the object passed to Save and Load in order to store their state. +// +// Detailed documentation is available in individual methods. +type Map struct { + *internalMap +} + +// Save adds the given object to the map. +// +// You should pass always pointers to the object you are saving. For example: +// +// type X struct { +// A int +// B *int +// } +// +// func (x *X) Save(m Map) { +// m.Save("A", &x.A) +// m.Save("B", &x.B) +// } +// +// func (x *X) Load(m Map) { +// m.Load("A", &x.A) +// m.Load("B", &x.B) +// } +func (m Map) Save(name string, objPtr interface{}) { + m.save(name, reflect.ValueOf(objPtr).Elem(), ".%s") +} + +// SaveValue adds the given object value to the map. +// +// This should be used for values where pointers are not available, or casts +// are required during Save/Load. +// +// For example, if we want to cast external package type P.Foo to int64: +// +// type X struct { +// A P.Foo +// } +// +// func (x *X) Save(m Map) { +// m.SaveValue("A", int64(x.A)) +// } +// +// func (x *X) Load(m Map) { +// m.LoadValue("A", new(int64), func(x interface{}) { +// x.A = P.Foo(x.(int64)) +// }) +// } +func (m Map) SaveValue(name string, obj interface{}) { + m.save(name, reflect.ValueOf(obj), ".(value %s)") +} + +// save is helper for the above. It takes the name of value to save the field +// to, the field object (obj), and a format string that specifies how the +// field's saving logic is dispatched from the struct (normal, value, etc.). The +// format string should expect one string parameter, which is the name of the +// field. +func (m Map) save(name string, obj reflect.Value, format string) { + if m.es == nil { + // Not currently encoding. + m.Failf("no encode state for %q", name) + } + + // Attempt the encode. + // + // These are sorted at the end, after all objects are added and will be + // sorted and checked for duplicates (see encodeStruct). + m.data = append(m.data, entry{ + name: name, + object: m.es.encodeObject(obj, false, format, name), + }) +} + +// Load loads the given object from the map. +// +// See Save for an example. +func (m Map) Load(name string, objPtr interface{}) { + m.load(name, reflect.ValueOf(objPtr), false, nil, ".%s") +} + +// LoadWait loads the given objects from the map, and marks it as requiring all +// AfterLoad executions to complete prior to running this object's AfterLoad. +// +// See Save for an example. +func (m Map) LoadWait(name string, objPtr interface{}) { + m.load(name, reflect.ValueOf(objPtr), true, nil, ".(wait %s)") +} + +// LoadValue loads the given object value from the map. +// +// See SaveValue for an example. +func (m Map) LoadValue(name string, objPtr interface{}, fn func(interface{})) { + o := reflect.ValueOf(objPtr) + m.load(name, o, true, func() { fn(o.Elem().Interface()) }, ".(value %s)") +} + +// load is helper for the above. It takes the name of value to load the field +// from, the target field pointer (objPtr), whether load completion of the +// struct depends on the field's load completion (wait), the load completion +// logic (fn), and a format string that specifies how the field's loading logic +// is dispatched from the struct (normal, wait, value, etc.). The format string +// should expect one string parameter, which is the name of the field. +func (m Map) load(name string, objPtr reflect.Value, wait bool, fn func(), format string) { + if m.ds == nil { + // Not currently decoding. + m.Failf("no decode state for %q", name) + } + + // Find the object. + // + // These are sorted up front (and should appear in the state file + // sorted as well), so we can do a binary search here to ensure that + // large structs don't behave badly. + i := sort.Search(len(m.data), func(i int) bool { + return m.data[i].name >= name + }) + if i >= len(m.data) || m.data[i].name != name { + // There is no data for this name? + m.Failf("no data found for %q", name) + } + + // Perform the decode. + m.ds.decodeObject(m.os, objPtr.Elem(), m.data[i].object, format, name) + if wait { + // Mark this individual object a blocker. + m.ds.waitObject(m.os, m.data[i].object, fn) + } +} + +// Failf fails the save or restore with the provided message. Processing will +// stop after calling Failf, as the state package uses a panic & recover +// mechanism for state errors. You should defer any cleanup required. +func (m Map) Failf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} + +// AfterLoad schedules a function execution when all objects have been allocated +// and their automated loading and customized load logic have been executed. fn +// will not be executed until all of current object's dependencies' AfterLoad() +// logic, if exist, have been executed. +func (m Map) AfterLoad(fn func()) { + if m.ds == nil { + // Not currently decoding. + m.Failf("not decoding") + } + + // Queue the local callback; this will execute when all of the above + // data dependencies have been cleared. + m.os.callbacks = append(m.os.callbacks, fn) +} diff --git a/pkg/state/object.proto b/pkg/state/object.proto new file mode 100644 index 000000000..6595c5519 --- /dev/null +++ b/pkg/state/object.proto @@ -0,0 +1,140 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gvisor.state.statefile; + +// Slice is a slice value. +message Slice { + uint32 length = 1; + uint32 capacity = 2; + uint64 ref_value = 3; +} + +// Array is an array value. +message Array { + repeated Object contents = 1; +} + +// Map is a map value. +message Map { + repeated Object keys = 1; + repeated Object values = 2; +} + +// Interface is an interface value. +message Interface { + string type = 1; + Object value = 2; +} + +// Struct is a basic composite value. +message Struct { + repeated Field fields = 1; +} + +// Field encodes a single field. +message Field { + string name = 1; + Object value = 2; +} + +// Uint16s encodes an uint16 array. To be used inside oneof structure. +message Uint16s { + // There is no 16-bit type in protobuf so we use variable length 32-bit here. + repeated uint32 values = 1; +} + +// Uint32s encodes an uint32 array. To be used inside oneof structure. +message Uint32s { + repeated fixed32 values = 1; +} + +// Uint64s encodes an uint64 array. To be used inside oneof structure. +message Uint64s { + repeated fixed64 values = 1; +} + +// Uintptrs encodes an uintptr array. To be used inside oneof structure. +message Uintptrs { + repeated fixed64 values = 1; +} + +// Int8s encodes an int8 array. To be used inside oneof structure. +message Int8s { + bytes values = 1; +} + +// Int16s encodes an int16 array. To be used inside oneof structure. +message Int16s { + // There is no 16-bit type in protobuf so we use variable length 32-bit here. + repeated int32 values = 1; +} + +// Int32s encodes an int32 array. To be used inside oneof structure. +message Int32s { + repeated sfixed32 values = 1; +} + +// Int64s encodes an int64 array. To be used inside oneof structure. +message Int64s { + repeated sfixed64 values = 1; +} + +// Bools encodes a boolean array. To be used inside oneof structure. +message Bools { + repeated bool values = 1; +} + +// Float64s encodes a float64 array. To be used inside oneof structure. +message Float64s { + repeated double values = 1; +} + +// Float32s encodes a float32 array. To be used inside oneof structure. +message Float32s { + repeated float values = 1; +} + +// Object are primitive encodings. +// +// Note that ref_value references an Object.id, below. +message Object { + oneof value { + bool bool_value = 1; + string string_value = 2; + int64 int64_value = 3; + uint64 uint64_value = 4; + double double_value = 5; + uint64 ref_value = 6; + Slice slice_value = 7; + Array array_value = 8; + Interface interface_value = 9; + Struct struct_value = 10; + Map map_value = 11; + bytes byte_array_value = 12; + Uint16s uint16_array_value = 13; + Uint32s uint32_array_value = 14; + Uint64s uint64_array_value = 15; + Uintptrs uintptr_array_value = 16; + Int8s int8_array_value = 17; + Int16s int16_array_value = 18; + Int32s int32_array_value = 19; + Int64s int64_array_value = 20; + Bools bool_array_value = 21; + Float64s float64_array_value = 22; + Float32s float32_array_value = 23; + } +} diff --git a/pkg/state/printer.go b/pkg/state/printer.go new file mode 100644 index 000000000..c61ec4a26 --- /dev/null +++ b/pkg/state/printer.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/golang/protobuf/proto" + pb "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto" +) + +// format formats a single object, for pretty-printing. +func format(graph uint64, depth int, object *pb.Object, html bool) (string, bool) { + switch x := object.GetValue().(type) { + case *pb.Object_BoolValue: + return fmt.Sprintf("%t", x.BoolValue), x.BoolValue != false + case *pb.Object_StringValue: + return fmt.Sprintf("\"%s\"", x.StringValue), x.StringValue != "" + case *pb.Object_Int64Value: + return fmt.Sprintf("%d", x.Int64Value), x.Int64Value != 0 + case *pb.Object_Uint64Value: + return fmt.Sprintf("%du", x.Uint64Value), x.Uint64Value != 0 + case *pb.Object_DoubleValue: + return fmt.Sprintf("%f", x.DoubleValue), x.DoubleValue != 0.0 + case *pb.Object_RefValue: + if x.RefValue == 0 { + return "nil", false + } + ref := fmt.Sprintf("g%dr%d", graph, x.RefValue) + if html { + ref = fmt.Sprintf("<a href=#%s>%s</a>", ref, ref) + } + return ref, true + case *pb.Object_SliceValue: + if x.SliceValue.RefValue == 0 { + return "nil", false + } + ref := fmt.Sprintf("g%dr%d", graph, x.SliceValue.RefValue) + if html { + ref = fmt.Sprintf("<a href=#%s>%s</a>", ref, ref) + } + return fmt.Sprintf("%s[:%d:%d]", ref, x.SliceValue.Length, x.SliceValue.Capacity), true + case *pb.Object_ArrayValue: + if len(x.ArrayValue.Contents) == 0 { + return "[]", false + } + items := make([]string, 0, len(x.ArrayValue.Contents)+2) + zeros := make([]string, 0) // used to eliminate zero entries. + items = append(items, "[") + tabs := "\n" + strings.Repeat("\t", depth) + for i := 0; i < len(x.ArrayValue.Contents); i++ { + item, ok := format(graph, depth+1, x.ArrayValue.Contents[i], html) + if ok { + if len(zeros) > 0 { + items = append(items, zeros...) + zeros = nil + } + items = append(items, fmt.Sprintf("\t%s,", item)) + } else { + zeros = append(zeros, fmt.Sprintf("\t%s,", item)) + } + } + if len(zeros) > 0 { + items = append(items, fmt.Sprintf("\t... (%d zero),", len(zeros))) + } + items = append(items, "]") + return strings.Join(items, tabs), len(zeros) < len(x.ArrayValue.Contents) + case *pb.Object_StructValue: + if len(x.StructValue.Fields) == 0 { + return "struct{}", false + } + items := make([]string, 0, len(x.StructValue.Fields)+2) + items = append(items, "struct{") + tabs := "\n" + strings.Repeat("\t", depth) + allZero := true + for _, field := range x.StructValue.Fields { + element, ok := format(graph, depth+1, field.Value, html) + allZero = allZero && !ok + items = append(items, fmt.Sprintf("\t%s: %s,", field.Name, element)) + } + items = append(items, "}") + return strings.Join(items, tabs), !allZero + case *pb.Object_MapValue: + if len(x.MapValue.Keys) == 0 { + return "map{}", false + } + items := make([]string, 0, len(x.MapValue.Keys)+2) + items = append(items, "map{") + tabs := "\n" + strings.Repeat("\t", depth) + for i := 0; i < len(x.MapValue.Keys); i++ { + key, _ := format(graph, depth+1, x.MapValue.Keys[i], html) + value, _ := format(graph, depth+1, x.MapValue.Values[i], html) + items = append(items, fmt.Sprintf("\t%s: %s,", key, value)) + } + items = append(items, "}") + return strings.Join(items, tabs), true + case *pb.Object_InterfaceValue: + if x.InterfaceValue.Type == "" { + return "interface(nil){}", false + } + element, _ := format(graph, depth+1, x.InterfaceValue.Value, html) + return fmt.Sprintf("interface(\"%s\"){%s}", x.InterfaceValue.Type, element), true + } + + // Should not happen, but tolerate. + return fmt.Sprintf("(unknown proto type: %T)", object.GetValue()), true +} + +// PrettyPrint reads the state stream from r, and pretty prints to w. +func PrettyPrint(w io.Writer, r io.Reader, html bool) error { + var ( + // current graph ID. + graph uint64 + + // current object ID. + id uint64 + ) + + if html { + fmt.Fprintf(w, "<pre>") + defer fmt.Fprintf(w, "</pre>") + } + + for { + // Find the first object to begin generation. + length, object, err := ReadHeader(r) + if err == io.EOF { + // Nothing else to do. + break + } else if err != nil { + return err + } + if !object { + // Increment the graph number & reset the ID. + graph++ + id = 0 + if length > 0 { + fmt.Fprintf(w, "(%d bytes non-object data)\n", length) + io.Copy(ioutil.Discard, &io.LimitedReader{ + R: r, + N: int64(length), + }) + } + continue + } + + // Read & unmarshal the object. + buf := make([]byte, length) + for done := 0; done < len(buf); { + n, err := r.Read(buf[done:]) + done += n + if n == 0 && err != nil { + return err + } + } + obj := new(pb.Object) + if err := proto.Unmarshal(buf, obj); err != nil { + return err + } + + id++ // First object must be one. + str, _ := format(graph, 0, obj, html) + tag := fmt.Sprintf("g%dr%d", graph, id) + if html { + tag = fmt.Sprintf("<a name=%s>%s</a>", tag, tag) + } + if _, err := fmt.Fprintf(w, "%s = %s\n", tag, str); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/state/state.go b/pkg/state/state.go new file mode 100644 index 000000000..23a0b5922 --- /dev/null +++ b/pkg/state/state.go @@ -0,0 +1,349 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package state provides functionality related to saving and loading object +// graphs. For most types, it provides a set of default saving / loading logic +// that will be invoked automatically if custom logic is not defined. +// +// Kind Support +// ---- ------- +// Bool default +// Int default +// Int8 default +// Int16 default +// Int32 default +// Int64 default +// Uint default +// Uint8 default +// Uint16 default +// Uint32 default +// Uint64 default +// Float32 default +// Float64 default +// Complex64 custom +// Complex128 custom +// Array default +// Chan custom +// Func custom +// Interface custom +// Map default (*) +// Ptr default +// Slice default +// String default +// Struct custom +// UnsafePointer custom +// +// (*) Maps are treated as value types by this package, even if they are +// pointers internally. If you want to save two independent references +// to the same map value, you must explicitly use a pointer to a map. +package state + +import ( + "fmt" + "io" + "reflect" + "runtime" + + pb "gvisor.googlesource.com/gvisor/pkg/state/object_go_proto" +) + +// ErrState is returned when an error is encountered during encode/decode. +type ErrState struct { + // Err is the underlying error. + Err error + + // path is the visit path from root to the current object. + path string + + // trace is the stack trace. + trace string +} + +// Error returns a sensible description of the state error. +func (e *ErrState) Error() string { + return fmt.Sprintf("%v:\nstate path: %s\n%s", e.Err, e.path, e.trace) +} + +// Save saves the given object state. +func Save(w io.Writer, rootPtr interface{}, stats *Stats) error { + // Create the encoding state. + es := &encodeState{ + idsByObject: make(map[uintptr]uint64), + w: w, + stats: stats, + } + + // Perform the encoding. + return es.safely(func() { + es.Serialize(reflect.ValueOf(rootPtr).Elem()) + }) +} + +// Load loads a checkpoint. +func Load(r io.Reader, rootPtr interface{}, stats *Stats) error { + // Create the decoding state. + ds := &decodeState{ + objectsByID: make(map[uint64]*objectState), + deferred: make(map[uint64]*pb.Object), + r: r, + stats: stats, + } + + // Attempt our decode. + return ds.safely(func() { + ds.Deserialize(reflect.ValueOf(rootPtr).Elem()) + }) +} + +// Fns are the state dispatch functions. +type Fns struct { + // Save is a function like Save(concreteType, Map). + Save interface{} + + // Load is a function like Load(concreteType, Map). + Load interface{} +} + +// Save executes the save function. +func (fns *Fns) invokeSave(obj reflect.Value, m Map) { + reflect.ValueOf(fns.Save).Call([]reflect.Value{obj, reflect.ValueOf(m)}) +} + +// Load executes the load function. +func (fns *Fns) invokeLoad(obj reflect.Value, m Map) { + reflect.ValueOf(fns.Load).Call([]reflect.Value{obj, reflect.ValueOf(m)}) +} + +// validateStateFn ensures types are correct. +func validateStateFn(fn interface{}, typ reflect.Type) bool { + fnTyp := reflect.TypeOf(fn) + if fnTyp.Kind() != reflect.Func { + return false + } + if fnTyp.NumIn() != 2 { + return false + } + if fnTyp.NumOut() != 0 { + return false + } + if fnTyp.In(0) != typ { + return false + } + if fnTyp.In(1) != reflect.TypeOf(Map{}) { + return false + } + return true +} + +// Validate validates all state functions. +func (fns *Fns) Validate(typ reflect.Type) bool { + return validateStateFn(fns.Save, typ) && validateStateFn(fns.Load, typ) +} + +type typeDatabase struct { + // nameToType is a forward lookup table. + nameToType map[string]reflect.Type + + // typeToName is the reverse lookup table. + typeToName map[reflect.Type]string + + // typeToFns is the function lookup table. + typeToFns map[reflect.Type]Fns +} + +// registeredTypes is a database used for SaveInterface and LoadInterface. +var registeredTypes = typeDatabase{ + nameToType: make(map[string]reflect.Type), + typeToName: make(map[reflect.Type]string), + typeToFns: make(map[reflect.Type]Fns), +} + +// register registers a type under the given name. This will generally be +// called via init() methods, and therefore uses panic to propagate errors. +func (t *typeDatabase) register(name string, typ reflect.Type, fns Fns) { + // We can't allow name collisions. + if ot, ok := t.nameToType[name]; ok { + panic(fmt.Sprintf("type %q can't use name %q, already in use by type %q", typ.Name(), name, ot.Name())) + } + + // Or multiple registrations. + if on, ok := t.typeToName[typ]; ok { + panic(fmt.Sprintf("type %q can't be registered as %q, already registered as %q", typ.Name(), name, on)) + } + + t.nameToType[name] = typ + t.typeToName[typ] = name + t.typeToFns[typ] = fns +} + +// lookupType finds a type given a name. +func (t *typeDatabase) lookupType(name string) (reflect.Type, bool) { + typ, ok := t.nameToType[name] + return typ, ok +} + +// lookupName finds a name given a type. +func (t *typeDatabase) lookupName(typ reflect.Type) (string, bool) { + name, ok := t.typeToName[typ] + return name, ok +} + +// lookupFns finds functions given a type. +func (t *typeDatabase) lookupFns(typ reflect.Type) (Fns, bool) { + fns, ok := t.typeToFns[typ] + return fns, ok +} + +// Register must be called for any interface implementation types that +// implements Loader. +// +// Register should be called either immediately after startup or via init() +// methods. Double registration of either names or types will result in a panic. +// +// No synchronization is provided; this should only be called in init. +// +// Example usage: +// +// state.Register("Foo", (*Foo)(nil), state.Fns{ +// Save: (*Foo).Save, +// Load: (*Foo).Load, +// }) +// +func Register(name string, instance interface{}, fns Fns) { + registeredTypes.register(name, reflect.TypeOf(instance), fns) +} + +// IsZeroValue checks if the given value is the zero value. +// +// This function is used by the stateify tool. +func IsZeroValue(val interface{}) bool { + if val == nil { + return true + } + return reflect.DeepEqual(val, reflect.Zero(reflect.TypeOf(val)).Interface()) +} + +// step captures one encoding / decoding step. On each step, there is up to one +// choice made, which is captured by non-nil param. We intentionally do not +// eagerly create the final path string, as that will only be needed upon panic. +type step struct { + // dereference indicate if the current object is obtained by + // dereferencing a pointer. + dereference bool + + // format is the formatting string that takes param below, if + // non-nil. For example, in array indexing case, we have "[%d]". + format string + + // param stores the choice made at the current encoding / decoding step. + // For eaxmple, in array indexing case, param stores the index. When no + // choice is made, e.g. dereference, param should be nil. + param interface{} +} + +// recoverable is the state encoding / decoding panic recovery facility. It is +// also used to store encoding / decoding steps as well as the reference to the +// original queued object from which the current object is dispatched. The +// complete encoding / decoding path is synthesised from the steps in all queued +// objects leading to the current object. +type recoverable struct { + from *recoverable + steps []step +} + +// push enters a new context level. +func (sr *recoverable) push(dereference bool, format string, param interface{}) { + sr.steps = append(sr.steps, step{dereference, format, param}) +} + +// pop exits the current context level. +func (sr *recoverable) pop() { + if len(sr.steps) <= 1 { + return + } + sr.steps = sr.steps[:len(sr.steps)-1] +} + +// path returns the complete encoding / decoding path from root. This is only +// called upon panic. +func (sr *recoverable) path() string { + if sr.from == nil { + return "root" + } + p := sr.from.path() + for _, s := range sr.steps { + if s.dereference { + p = fmt.Sprintf("*(%s)", p) + } + if s.param == nil { + p += s.format + } else { + p += fmt.Sprintf(s.format, s.param) + } + } + return p +} + +func (sr *recoverable) copy() recoverable { + return recoverable{from: sr.from, steps: append([]step(nil), sr.steps...)} +} + +// safely executes the given function, catching a panic and unpacking as an error. +// +// The error flow through the state package uses panic and recover. There are +// two important reasons for this: +// +// 1) Many of the reflection methods will already panic with invalid data or +// violated assumptions. We would want to recover anyways here. +// +// 2) It allows us to eliminate boilerplate within Save() and Load() functions. +// In nearly all cases, when the low-level serialization functions fail, you +// will want the checkpoint to fail anyways. Plumbing errors through every +// method doesn't add a lot of value. If there are specific error conditions +// that you'd like to handle, you should add appropriate functionality to +// objects themselves prior to calling Save() and Load(). +func (sr *recoverable) safely(fn func()) (err error) { + defer func() { + if r := recover(); r != nil { + es := new(ErrState) + if e, ok := r.(error); ok { + es.Err = e + } else { + es.Err = fmt.Errorf("%v", r) + } + + es.path = sr.path() + + // Make a stack. We don't know how big it will be ahead + // of time, but want to make sure we get the whole + // thing. So we just do a stupid brute force approach. + var stack []byte + for sz := 1024; ; sz *= 2 { + stack = make([]byte, sz) + n := runtime.Stack(stack, false) + if n < sz { + es.trace = string(stack[:n]) + break + } + } + + // Set the error. + err = es + } + }() + + // Execute the function. + fn() + return nil +} diff --git a/pkg/state/state_test.go b/pkg/state/state_test.go new file mode 100644 index 000000000..d5a739f18 --- /dev/null +++ b/pkg/state/state_test.go @@ -0,0 +1,719 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "bytes" + "io/ioutil" + "math" + "reflect" + "testing" +) + +// TestCase is used to define a single success/failure testcase of +// serialization of a set of objects. +type TestCase struct { + // Name is the name of the test case. + Name string + + // Objects is the list of values to serialize. + Objects []interface{} + + // Fail is whether the test case is supposed to fail or not. + Fail bool +} + +// runTest runs all testcases. +func runTest(t *testing.T, tests []TestCase) { + for _, test := range tests { + t.Logf("TEST %s:", test.Name) + for i, root := range test.Objects { + t.Logf(" case#%d: %#v", i, root) + + // Save the passed object. + saveBuffer := &bytes.Buffer{} + saveObjectPtr := reflect.New(reflect.TypeOf(root)) + saveObjectPtr.Elem().Set(reflect.ValueOf(root)) + if err := Save(saveBuffer, saveObjectPtr.Interface(), nil); err != nil && !test.Fail { + t.Errorf(" FAIL: Save failed unexpectedly: %v", err) + continue + } else if err != nil { + t.Logf(" PASS: Save failed as expected: %v", err) + continue + } + + // Load a new copy of the object. + loadObjectPtr := reflect.New(reflect.TypeOf(root)) + if err := Load(bytes.NewReader(saveBuffer.Bytes()), loadObjectPtr.Interface(), nil); err != nil && !test.Fail { + t.Errorf(" FAIL: Load failed unexpectedly: %v", err) + continue + } else if err != nil { + t.Logf(" PASS: Load failed as expected: %v", err) + continue + } + + // Compare the values. + loadedValue := loadObjectPtr.Elem().Interface() + if eq := reflect.DeepEqual(root, loadedValue); !eq && !test.Fail { + t.Errorf(" FAIL: Objects differs; got %#v", loadedValue) + continue + } else if !eq { + t.Logf(" PASS: Object different as expected.") + continue + } + + // Everything went okay. Is that good? + if test.Fail { + t.Errorf(" FAIL: Unexpected success.") + } else { + t.Logf(" PASS: Success.") + } + } + } +} + +// dumbStruct is a struct which does not implement the loader/saver interface. +// We expect that serialization of this struct will fail. +type dumbStruct struct { + A int + B int +} + +// smartStruct is a struct which does implement the loader/saver interface. +// We expect that serialization of this struct will succeed. +type smartStruct struct { + A int + B int +} + +func (s *smartStruct) save(m Map) { + m.Save("A", &s.A) + m.Save("B", &s.B) +} + +func (s *smartStruct) load(m Map) { + m.Load("A", &s.A) + m.Load("B", &s.B) +} + +// valueLoadStruct uses a value load. +type valueLoadStruct struct { + v int +} + +func (v *valueLoadStruct) save(m Map) { + m.SaveValue("v", v.v) +} + +func (v *valueLoadStruct) load(m Map) { + m.LoadValue("v", new(int), func(value interface{}) { + v.v = value.(int) + }) +} + +// afterLoadStruct has an AfterLoad function. +type afterLoadStruct struct { + v int +} + +func (a *afterLoadStruct) save(m Map) { +} + +func (a *afterLoadStruct) load(m Map) { + m.AfterLoad(func() { + a.v++ + }) +} + +// genericContainer is a generic dispatcher. +type genericContainer struct { + v interface{} +} + +func (g *genericContainer) save(m Map) { + m.Save("v", &g.v) +} + +func (g *genericContainer) load(m Map) { + m.Load("v", &g.v) +} + +// sliceContainer is a generic slice. +type sliceContainer struct { + v []interface{} +} + +func (s *sliceContainer) save(m Map) { + m.Save("v", &s.v) +} + +func (s *sliceContainer) load(m Map) { + m.Load("v", &s.v) +} + +// mapContainer is a generic map. +type mapContainer struct { + v map[int]interface{} +} + +func (mc *mapContainer) save(m Map) { + m.Save("v", &mc.v) +} + +func (mc *mapContainer) load(m Map) { + // Some of the test cases below assume legacy behavior wherein maps + // will automatically inherit dependencies. + m.LoadWait("v", &mc.v) +} + +// dumbMap is a map which does not implement the loader/saver interface. +// Serialization of this map will default to the standard encode/decode logic. +type dumbMap map[string]int + +// pointerStruct contains various pointers, shared and non-shared, and pointers +// to pointers. We expect that serialization will respect the structure. +type pointerStruct struct { + A *int + B *int + C *int + D *int + + AA **int + BB **int +} + +func (p *pointerStruct) save(m Map) { + m.Save("A", &p.A) + m.Save("B", &p.B) + m.Save("C", &p.C) + m.Save("D", &p.D) + m.Save("AA", &p.AA) + m.Save("BB", &p.BB) +} + +func (p *pointerStruct) load(m Map) { + m.Load("A", &p.A) + m.Load("B", &p.B) + m.Load("C", &p.C) + m.Load("D", &p.D) + m.Load("AA", &p.AA) + m.Load("BB", &p.BB) +} + +// testInterface is a trivial interface example. +type testInterface interface { + Foo() +} + +// testImpl is a trivial implementation of testInterface. +type testImpl struct { +} + +// Foo satisfies testInterface. +func (t *testImpl) Foo() { +} + +// testImpl is trivially serializable. +func (t *testImpl) save(m Map) { +} + +// testImpl is trivially serializable. +func (t *testImpl) load(m Map) { +} + +// testI demonstrates interface dispatching. +type testI struct { + I testInterface +} + +func (t *testI) save(m Map) { + m.Save("I", &t.I) +} + +func (t *testI) load(m Map) { + m.Load("I", &t.I) +} + +// cycleStruct is used to implement basic cycles. +type cycleStruct struct { + c *cycleStruct +} + +func (c *cycleStruct) save(m Map) { + m.Save("c", &c.c) +} + +func (c *cycleStruct) load(m Map) { + m.Load("c", &c.c) +} + +// badCycleStruct actually has deadlocking dependencies. +// +// This should pass if b.b = {nil|b} and fail otherwise. +type badCycleStruct struct { + b *badCycleStruct +} + +func (b *badCycleStruct) save(m Map) { + m.Save("b", &b.b) +} + +func (b *badCycleStruct) load(m Map) { + m.LoadWait("b", &b.b) + m.AfterLoad(func() { + // This is not executable, since AfterLoad requires that the + // object and all dependencies are complete. This should cause + // a deadlock error during load. + }) +} + +// emptyStructPointer points to an empty struct. +type emptyStructPointer struct { + nothing *struct{} +} + +func (e *emptyStructPointer) save(m Map) { + m.Save("nothing", &e.nothing) +} + +func (e *emptyStructPointer) load(m Map) { + m.Load("nothing", &e.nothing) +} + +// truncateInteger truncates an integer. +type truncateInteger struct { + v int64 + v2 int32 +} + +func (t *truncateInteger) save(m Map) { + t.v2 = int32(t.v) + m.Save("v", &t.v) +} + +func (t *truncateInteger) load(m Map) { + m.Load("v", &t.v2) + t.v = int64(t.v2) +} + +// truncateUnsignedInteger truncates an unsigned integer. +type truncateUnsignedInteger struct { + v uint64 + v2 uint32 +} + +func (t *truncateUnsignedInteger) save(m Map) { + t.v2 = uint32(t.v) + m.Save("v", &t.v) +} + +func (t *truncateUnsignedInteger) load(m Map) { + m.Load("v", &t.v2) + t.v = uint64(t.v2) +} + +// truncateFloat truncates a floating point number. +type truncateFloat struct { + v float64 + v2 float32 +} + +func (t *truncateFloat) save(m Map) { + t.v2 = float32(t.v) + m.Save("v", &t.v) +} + +func (t *truncateFloat) load(m Map) { + m.Load("v", &t.v2) + t.v = float64(t.v2) +} + +func TestTypes(t *testing.T) { + // x and y are basic integers, while xp points to x. + x := 1 + y := 2 + xp := &x + + // cs is a single object cycle. + cs := cycleStruct{nil} + cs.c = &cs + + // cs1 and cs2 are in a two object cycle. + cs1 := cycleStruct{nil} + cs2 := cycleStruct{nil} + cs1.c = &cs2 + cs2.c = &cs1 + + // bs is a single object cycle. + bs := badCycleStruct{nil} + bs.b = &bs + + // bs2 and bs2 are in a deadlocking cycle. + bs1 := badCycleStruct{nil} + bs2 := badCycleStruct{nil} + bs1.b = &bs2 + bs2.b = &bs1 + + // regular nils. + var ( + nilmap dumbMap + nilslice []byte + ) + + // embed points to embedded fields. + embed1 := pointerStruct{} + embed1.AA = &embed1.A + embed2 := pointerStruct{} + embed2.BB = &embed2.B + + // es1 contains two structs pointing to the same empty struct. + es := emptyStructPointer{new(struct{})} + es1 := []emptyStructPointer{es, es} + + tests := []TestCase{ + { + Name: "bool", + Objects: []interface{}{ + true, + false, + }, + }, + { + Name: "integers", + Objects: []interface{}{ + int(0), + int(1), + int(-1), + int8(0), + int8(1), + int8(-1), + int16(0), + int16(1), + int16(-1), + int32(0), + int32(1), + int32(-1), + int64(0), + int64(1), + int64(-1), + }, + }, + { + Name: "unsigned integers", + Objects: []interface{}{ + uint(0), + uint(1), + uint8(0), + uint8(1), + uint16(0), + uint16(1), + uint32(1), + uint64(0), + uint64(1), + }, + }, + { + Name: "strings", + Objects: []interface{}{ + "", + "foo", + "bar", + }, + }, + { + Name: "slices", + Objects: []interface{}{ + []int{-1, 0, 1}, + []*int{&x, &x, &x}, + []int{1, 2, 3}[0:1], + []int{1, 2, 3}[1:2], + make([]byte, 32), + make([]byte, 32)[:16], + make([]byte, 32)[:16:20], + nilslice, + }, + }, + { + Name: "arrays", + Objects: []interface{}{ + &[1048576]bool{false, true, false, true}, + &[1048576]uint8{0, 1, 2, 3}, + &[1048576]byte{0, 1, 2, 3}, + &[1048576]uint16{0, 1, 2, 3}, + &[1048576]uint{0, 1, 2, 3}, + &[1048576]uint32{0, 1, 2, 3}, + &[1048576]uint64{0, 1, 2, 3}, + &[1048576]uintptr{0, 1, 2, 3}, + &[1048576]int8{0, -1, -2, -3}, + &[1048576]int16{0, -1, -2, -3}, + &[1048576]int32{0, -1, -2, -3}, + &[1048576]int64{0, -1, -2, -3}, + &[1048576]float32{0, 1.1, 2.2, 3.3}, + &[1048576]float64{0, 1.1, 2.2, 3.3}, + }, + }, + { + Name: "pointers", + Objects: []interface{}{ + &pointerStruct{A: &x, B: &x, C: &y, D: &y, AA: &xp, BB: &xp}, + &pointerStruct{}, + }, + }, + { + Name: "empty struct", + Objects: []interface{}{ + struct{}{}, + }, + }, + { + Name: "unenlightened structs", + Objects: []interface{}{ + &dumbStruct{A: 1, B: 2}, + }, + Fail: true, + }, + { + Name: "enlightened structs", + Objects: []interface{}{ + &smartStruct{A: 1, B: 2}, + }, + }, + { + Name: "load-hooks", + Objects: []interface{}{ + &afterLoadStruct{v: 1}, + &valueLoadStruct{v: 1}, + &genericContainer{v: &afterLoadStruct{v: 1}}, + &genericContainer{v: &valueLoadStruct{v: 1}}, + &sliceContainer{v: []interface{}{&afterLoadStruct{v: 1}}}, + &sliceContainer{v: []interface{}{&valueLoadStruct{v: 1}}}, + &mapContainer{v: map[int]interface{}{0: &afterLoadStruct{v: 1}}}, + &mapContainer{v: map[int]interface{}{0: &valueLoadStruct{v: 1}}}, + }, + }, + { + Name: "maps", + Objects: []interface{}{ + dumbMap{"a": -1, "b": 0, "c": 1}, + map[smartStruct]int{{}: 0, {A: 1}: 1}, + nilmap, + &mapContainer{v: map[int]interface{}{0: &smartStruct{A: 1}}}, + }, + }, + { + Name: "interfaces", + Objects: []interface{}{ + &testI{&testImpl{}}, + &testI{nil}, + &testI{(*testImpl)(nil)}, + }, + }, + { + Name: "unregistered-interfaces", + Objects: []interface{}{ + &genericContainer{v: afterLoadStruct{v: 1}}, + &genericContainer{v: valueLoadStruct{v: 1}}, + &sliceContainer{v: []interface{}{afterLoadStruct{v: 1}}}, + &sliceContainer{v: []interface{}{valueLoadStruct{v: 1}}}, + &mapContainer{v: map[int]interface{}{0: afterLoadStruct{v: 1}}}, + &mapContainer{v: map[int]interface{}{0: valueLoadStruct{v: 1}}}, + }, + Fail: true, + }, + { + Name: "cycles", + Objects: []interface{}{ + &cs, + &cs1, + &cycleStruct{&cs1}, + &cycleStruct{&cs}, + &badCycleStruct{nil}, + &bs, + }, + }, + { + Name: "deadlock", + Objects: []interface{}{ + &bs1, + }, + Fail: true, + }, + { + Name: "embed", + Objects: []interface{}{ + &embed1, + &embed2, + }, + Fail: true, + }, + { + Name: "empty structs", + Objects: []interface{}{ + new(struct{}), + es, + es1, + }, + }, + { + Name: "truncated okay", + Objects: []interface{}{ + &truncateInteger{v: 1}, + &truncateUnsignedInteger{v: 1}, + &truncateFloat{v: 1.0}, + }, + }, + { + Name: "truncated bad", + Objects: []interface{}{ + &truncateInteger{v: math.MaxInt32 + 1}, + &truncateUnsignedInteger{v: math.MaxUint32 + 1}, + &truncateFloat{v: math.MaxFloat32 * 2}, + }, + Fail: true, + }, + } + + runTest(t, tests) +} + +// benchStruct is used for benchmarking. +type benchStruct struct { + b *benchStruct + + // Dummy data is included to ensure that these objects are large. + // This is to detect possible regression when registering objects. + _ [4096]byte +} + +func (b *benchStruct) save(m Map) { + m.Save("b", &b.b) +} + +func (b *benchStruct) load(m Map) { + m.LoadWait("b", &b.b) + m.AfterLoad(b.afterLoad) +} + +func (b *benchStruct) afterLoad() { + // Do nothing, just force scheduling. +} + +// buildObject builds a benchmark object. +func buildObject(n int) (b *benchStruct) { + for i := 0; i < n; i++ { + b = &benchStruct{b: b} + } + return +} + +func BenchmarkEncoding(b *testing.B) { + b.StopTimer() + bs := buildObject(b.N) + var stats Stats + b.StartTimer() + if err := Save(ioutil.Discard, bs, &stats); err != nil { + b.Errorf("save failed: %v", err) + } + b.StopTimer() + if b.N > 1000 { + b.Logf("breakdown (n=%d): %s", b.N, &stats) + } +} + +func BenchmarkDecoding(b *testing.B) { + b.StopTimer() + bs := buildObject(b.N) + var newBS benchStruct + buf := &bytes.Buffer{} + if err := Save(buf, bs, nil); err != nil { + b.Errorf("save failed: %v", err) + } + var stats Stats + b.StartTimer() + if err := Load(buf, &newBS, &stats); err != nil { + b.Errorf("load failed: %v", err) + } + b.StopTimer() + if b.N > 1000 { + b.Logf("breakdown (n=%d): %s", b.N, &stats) + } +} + +func init() { + Register("stateTest.smartStruct", (*smartStruct)(nil), Fns{ + Save: (*smartStruct).save, + Load: (*smartStruct).load, + }) + Register("stateTest.afterLoadStruct", (*afterLoadStruct)(nil), Fns{ + Save: (*afterLoadStruct).save, + Load: (*afterLoadStruct).load, + }) + Register("stateTest.valueLoadStruct", (*valueLoadStruct)(nil), Fns{ + Save: (*valueLoadStruct).save, + Load: (*valueLoadStruct).load, + }) + Register("stateTest.genericContainer", (*genericContainer)(nil), Fns{ + Save: (*genericContainer).save, + Load: (*genericContainer).load, + }) + Register("stateTest.sliceContainer", (*sliceContainer)(nil), Fns{ + Save: (*sliceContainer).save, + Load: (*sliceContainer).load, + }) + Register("stateTest.mapContainer", (*mapContainer)(nil), Fns{ + Save: (*mapContainer).save, + Load: (*mapContainer).load, + }) + Register("stateTest.pointerStruct", (*pointerStruct)(nil), Fns{ + Save: (*pointerStruct).save, + Load: (*pointerStruct).load, + }) + Register("stateTest.testImpl", (*testImpl)(nil), Fns{ + Save: (*testImpl).save, + Load: (*testImpl).load, + }) + Register("stateTest.testI", (*testI)(nil), Fns{ + Save: (*testI).save, + Load: (*testI).load, + }) + Register("stateTest.cycleStruct", (*cycleStruct)(nil), Fns{ + Save: (*cycleStruct).save, + Load: (*cycleStruct).load, + }) + Register("stateTest.badCycleStruct", (*badCycleStruct)(nil), Fns{ + Save: (*badCycleStruct).save, + Load: (*badCycleStruct).load, + }) + Register("stateTest.emptyStructPointer", (*emptyStructPointer)(nil), Fns{ + Save: (*emptyStructPointer).save, + Load: (*emptyStructPointer).load, + }) + Register("stateTest.truncateInteger", (*truncateInteger)(nil), Fns{ + Save: (*truncateInteger).save, + Load: (*truncateInteger).load, + }) + Register("stateTest.truncateUnsignedInteger", (*truncateUnsignedInteger)(nil), Fns{ + Save: (*truncateUnsignedInteger).save, + Load: (*truncateUnsignedInteger).load, + }) + Register("stateTest.truncateFloat", (*truncateFloat)(nil), Fns{ + Save: (*truncateFloat).save, + Load: (*truncateFloat).load, + }) + Register("stateTest.benchStruct", (*benchStruct)(nil), Fns{ + Save: (*benchStruct).save, + Load: (*benchStruct).load, + }) +} diff --git a/pkg/state/statefile/BUILD b/pkg/state/statefile/BUILD new file mode 100644 index 000000000..df2c6a578 --- /dev/null +++ b/pkg/state/statefile/BUILD @@ -0,0 +1,23 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "statefile", + srcs = ["statefile.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/state/statefile", + visibility = ["//:sandbox"], + deps = [ + "//pkg/binary", + "//pkg/compressio", + "//pkg/hashio", + ], +) + +go_test( + name = "statefile_test", + size = "small", + srcs = ["statefile_test.go"], + embed = [":statefile"], + deps = ["//pkg/hashio"], +) diff --git a/pkg/state/statefile/statefile.go b/pkg/state/statefile/statefile.go new file mode 100644 index 000000000..b25b743b7 --- /dev/null +++ b/pkg/state/statefile/statefile.go @@ -0,0 +1,233 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package statefile defines the state file data stream. +// +// This package currently does not include any details regarding the state +// encoding itself, only details regarding state metadata and data layout. +// +// The file format is defined as follows. +// +// /------------------------------------------------------\ +// | header (8-bytes) | +// +------------------------------------------------------+ +// | metadata length (8-bytes) | +// +------------------------------------------------------+ +// | metadata | +// +------------------------------------------------------+ +// | data | +// \------------------------------------------------------/ +// +// First, it includes a 8-byte magic header which is the following +// sequence of bytes [0x67, 0x56, 0x69, 0x73, 0x6f, 0x72, 0x53, 0x46] +// +// This header is followed by an 8-byte length N (big endian), and an +// ASCII-encoded JSON map that is exactly N bytes long. +// +// This map includes only strings for keys and strings for values. Keys in the +// map that begin with "_" are for internal use only. They may be read, but may +// not be provided by the user. In the future, this metadata may contain some +// information relating to the state encoding itself. +// +// After the map, the remainder of the file is the state data. +package statefile + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/json" + "fmt" + "hash" + "io" + "strings" + "time" + + "gvisor.googlesource.com/gvisor/pkg/binary" + "gvisor.googlesource.com/gvisor/pkg/compressio" + "gvisor.googlesource.com/gvisor/pkg/hashio" +) + +// keySize is the AES-256 key length. +const keySize = 32 + +// compressionChunkSize is the chunk size for compression. +const compressionChunkSize = 1024 * 1024 + +// maxMetadataSize is the size limit of metadata section. +const maxMetadataSize = 16 * 1024 * 1024 + +// magicHeader is the byte sequence beginning each file. +var magicHeader = []byte("\x67\x56\x69\x73\x6f\x72\x53\x46") + +// ErrBadMagic is returned if the header does not match. +var ErrBadMagic = fmt.Errorf("bad magic header") + +// ErrMetadataMissing is returned if the state file is missing mandatory metadata. +var ErrMetadataMissing = fmt.Errorf("missing metadata") + +// ErrInvalidMetadataLength is returned if the metadata length is too large. +var ErrInvalidMetadataLength = fmt.Errorf("metadata length invalid, maximum size is %d", maxMetadataSize) + +// ErrMetadataInvalid is returned if passed metadata is invalid. +var ErrMetadataInvalid = fmt.Errorf("metadata invalid, can't start with _") + +// NewWriter returns a state data writer for a statefile. +// +// Note that the returned WriteCloser must be closed. +func NewWriter(w io.Writer, key []byte, metadata map[string]string, compressionLevel int) (io.WriteCloser, error) { + if metadata == nil { + metadata = make(map[string]string) + } + for k := range metadata { + if strings.HasPrefix(k, "_") { + return nil, ErrMetadataInvalid + } + } + + // Create our HMAC function. + h := hmac.New(sha256.New, key) + mw := io.MultiWriter(w, h) + + // First, write the header. + if _, err := mw.Write(magicHeader); err != nil { + return nil, err + } + + // Generate a timestamp, for convenience only. + metadata["_timestamp"] = time.Now().UTC().String() + defer delete(metadata, "_timestamp") + + // Write the metadata. + b, err := json.Marshal(metadata) + if err != nil { + return nil, err + } + + if len(b) > maxMetadataSize { + return nil, ErrInvalidMetadataLength + } + + // Metadata length. + if err := binary.WriteUint64(mw, binary.BigEndian, uint64(len(b))); err != nil { + return nil, err + } + // Metadata bytes; io.MultiWriter will return a short write error if + // any of the writers returns < n. + if _, err := mw.Write(b); err != nil { + return nil, err + } + // Write the current hash. + cur := h.Sum(nil) + for done := 0; done < len(cur); { + n, err := mw.Write(cur[done:]) + done += n + if err != nil { + return nil, err + } + } + + w = hashio.NewWriter(w, h) + + // Wrap in compression. + return compressio.NewWriter(w, compressionChunkSize, compressionLevel) +} + +// MetadataUnsafe reads out the metadata from a state file without verifying any +// HMAC. This function shouldn't be called for untrusted input files. +func MetadataUnsafe(r io.Reader) (map[string]string, error) { + return metadata(r, nil) +} + +// metadata validates the magic header and reads out the metadata from a state +// data stream. +func metadata(r io.Reader, h hash.Hash) (map[string]string, error) { + if h != nil { + r = io.TeeReader(r, h) + } + + // Read and validate magic header. + b := make([]byte, len(magicHeader)) + if _, err := r.Read(b); err != nil { + return nil, err + } + if !bytes.Equal(b, magicHeader) { + return nil, ErrBadMagic + } + + // Read and validate metadata. + b, err := func() (b []byte, err error) { + defer func() { + if r := recover(); r != nil { + b = nil + err = fmt.Errorf("%v", r) + } + }() + + metadataLen, err := binary.ReadUint64(r, binary.BigEndian) + if err != nil { + return nil, err + } + if metadataLen > maxMetadataSize { + return nil, ErrInvalidMetadataLength + } + b = make([]byte, int(metadataLen)) + if _, err := io.ReadFull(r, b); err != nil { + return nil, err + } + return b, nil + }() + if err != nil { + return nil, err + } + + if h != nil { + // Check the hash prior to decoding. + cur := h.Sum(nil) + buf := make([]byte, len(cur)) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + if !hmac.Equal(cur, buf) { + return nil, hashio.ErrHashMismatch + } + } + + // Decode the metadata. + metadata := make(map[string]string) + if err := json.Unmarshal(b, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// NewReader returns a reader for a statefile. +func NewReader(r io.Reader, key []byte) (io.Reader, map[string]string, error) { + // Read the metadata with the hash. + h := hmac.New(sha256.New, key) + metadata, err := metadata(r, h) + if err != nil { + return nil, nil, err + } + + r = hashio.NewReader(r, h) + + // Wrap in compression. + rc, err := compressio.NewReader(r) + if err != nil { + return nil, nil, err + } + return rc, metadata, nil +} diff --git a/pkg/state/statefile/statefile_test.go b/pkg/state/statefile/statefile_test.go new file mode 100644 index 000000000..6e67b51de --- /dev/null +++ b/pkg/state/statefile/statefile_test.go @@ -0,0 +1,299 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statefile + +import ( + "bytes" + "compress/flate" + crand "crypto/rand" + "encoding/base64" + "io" + "math/rand" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/hashio" +) + +func randomKey() ([]byte, error) { + r := make([]byte, base64.RawStdEncoding.DecodedLen(keySize)) + if _, err := io.ReadFull(crand.Reader, r); err != nil { + return nil, err + } + key := make([]byte, keySize) + base64.RawStdEncoding.Encode(key, r) + return key, nil +} + +type testCase struct { + name string + data []byte + metadata map[string]string +} + +func TestStatefile(t *testing.T) { + cases := []testCase{ + // Various data sizes. + {"nil", nil, nil}, + {"empty", []byte(""), nil}, + {"some", []byte("_"), nil}, + {"one", []byte("0"), nil}, + {"two", []byte("01"), nil}, + {"three", []byte("012"), nil}, + {"four", []byte("0123"), nil}, + {"five", []byte("01234"), nil}, + {"six", []byte("012356"), nil}, + {"seven", []byte("0123567"), nil}, + {"eight", []byte("01235678"), nil}, + + // Make sure we have one longer than the hash length. + {"longer than hash", []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), nil}, + + // Make sure we have one longer than the segment size. + {"segments", make([]byte, 3*hashio.SegmentSize), nil}, + {"segments minus one", make([]byte, 3*hashio.SegmentSize-1), nil}, + {"segments plus one", make([]byte, 3*hashio.SegmentSize+1), nil}, + {"segments minus hash", make([]byte, 3*hashio.SegmentSize-32), nil}, + {"segments plus hash", make([]byte, 3*hashio.SegmentSize+32), nil}, + {"large", make([]byte, 30*hashio.SegmentSize), nil}, + + // Different metadata. + {"one metadata", []byte("data"), map[string]string{"foo": "bar"}}, + {"two metadata", []byte("data"), map[string]string{"foo": "bar", "one": "two"}}, + } + + for _, c := range cases { + // Generate a key. + integrityKey, err := randomKey() + if err != nil { + t.Errorf("can't generate key: got %v, excepted nil", err) + continue + } + + t.Run(c.name, func(t *testing.T) { + for _, key := range [][]byte{nil, integrityKey} { + t.Run("key="+string(key), func(t *testing.T) { + // Encoding happens via a buffer. + var bufEncoded bytes.Buffer + var bufDecoded bytes.Buffer + + // Do all the writing. + w, err := NewWriter(&bufEncoded, key, c.metadata, flate.BestSpeed) + if err != nil { + t.Fatalf("error creating writer: got %v, expected nil", err) + } + if _, err := io.Copy(w, bytes.NewBuffer(c.data)); err != nil { + t.Fatalf("error during write: got %v, expected nil", err) + } + + // Finish the sum. + if err := w.Close(); err != nil { + t.Fatalf("error during close: got %v, expected nil", err) + } + + t.Logf("original data: %d bytes, encoded: %d bytes.", + len(c.data), len(bufEncoded.Bytes())) + + // Do all the reading. + r, metadata, err := NewReader(bytes.NewReader(bufEncoded.Bytes()), key) + if err != nil { + t.Fatalf("error creating reader: got %v, expected nil", err) + } + if _, err := io.Copy(&bufDecoded, r); err != nil { + t.Fatalf("error during read: got %v, expected nil", err) + } + + // Check that the data matches. + if !bytes.Equal(c.data, bufDecoded.Bytes()) { + t.Fatalf("data didn't match (%d vs %d bytes)", len(bufDecoded.Bytes()), len(c.data)) + } + + // Check that the metadata matches. + for k, v := range c.metadata { + nv, ok := metadata[k] + if !ok { + t.Fatalf("missing metadata: %s", k) + } + if v != nv { + t.Fatalf("mismatched metdata for %s: got %s, expected %s", k, nv, v) + } + } + + // Change the data and verify that it fails. + b := append([]byte(nil), bufEncoded.Bytes()...) + b[rand.Intn(len(b))]++ + r, _, err = NewReader(bytes.NewReader(b), key) + if err == nil { + _, err = io.Copy(&bufDecoded, r) + } + if err == nil { + t.Error("got no error: expected error on data corruption") + } + + // Change the key and verify that it fails. + if key == nil { + key = integrityKey + } else { + key[rand.Intn(len(key))]++ + } + r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), key) + if err == nil { + _, err = io.Copy(&bufDecoded, r) + } + if err != hashio.ErrHashMismatch { + t.Errorf("got error: %v, expected ErrHashMismatch on key mismatch", err) + } + }) + } + }) + } +} + +const benchmarkDataSize = 10 * 1024 * 1024 + +func benchmark(b *testing.B, size int, write bool, compressible bool) { + b.StopTimer() + b.SetBytes(benchmarkDataSize) + + // Generate source data. + var source []byte + if compressible { + // For compressible data, we use essentially all zeros. + source = make([]byte, benchmarkDataSize) + } else { + // For non-compressible data, we use random base64 data (to + // make it marginally compressible, a ratio of 75%). + var sourceBuf bytes.Buffer + bufW := base64.NewEncoder(base64.RawStdEncoding, &sourceBuf) + bufR := rand.New(rand.NewSource(0)) + if _, err := io.CopyN(bufW, bufR, benchmarkDataSize); err != nil { + b.Fatalf("unable to seed random data: %v", err) + } + source = sourceBuf.Bytes() + } + + // Generate a random key for integrity check. + key, err := randomKey() + if err != nil { + b.Fatalf("error generating key: %v", err) + } + + // Define our benchmark functions. Prior to running the readState + // function here, you must execute the writeState function at least + // once (done below). + var stateBuf bytes.Buffer + writeState := func() { + stateBuf.Reset() + w, err := NewWriter(&stateBuf, key, nil, flate.BestSpeed) + if err != nil { + b.Fatalf("error creating writer: %v", err) + } + for done := 0; done < len(source); { + chunk := size // limit size. + if done+chunk > len(source) { + chunk = len(source) - done + } + n, err := w.Write(source[done : done+chunk]) + done += n + if n == 0 && err != nil { + b.Fatalf("error during write: %v", err) + } + } + if err := w.Close(); err != nil { + b.Fatalf("error closing writer: %v", err) + } + } + readState := func() { + tmpBuf := bytes.NewBuffer(stateBuf.Bytes()) + r, _, err := NewReader(tmpBuf, key) + if err != nil { + b.Fatalf("error creating reader: %v", err) + } + for done := 0; done < len(source); { + chunk := size // limit size. + if done+chunk > len(source) { + chunk = len(source) - done + } + n, err := r.Read(source[done : done+chunk]) + done += n + if n == 0 && err != nil { + b.Fatalf("error during read: %v", err) + } + } + } + // Generate the state once without timing to ensure that buffers have + // been appropriately allocated. + writeState() + if write { + b.StartTimer() + for i := 0; i < b.N; i++ { + writeState() + } + b.StopTimer() + } else { + b.StartTimer() + for i := 0; i < b.N; i++ { + readState() + } + b.StopTimer() + } +} + +func BenchmarkWrite1BCompressible(b *testing.B) { + benchmark(b, 1, true, true) +} + +func BenchmarkWrite1BNoncompressible(b *testing.B) { + benchmark(b, 1, true, false) +} + +func BenchmarkWrite4KCompressible(b *testing.B) { + benchmark(b, 4096, true, true) +} + +func BenchmarkWrite4KNoncompressible(b *testing.B) { + benchmark(b, 4096, true, false) +} + +func BenchmarkWrite1MCompressible(b *testing.B) { + benchmark(b, 1024*1024, true, true) +} + +func BenchmarkWrite1MNoncompressible(b *testing.B) { + benchmark(b, 1024*1024, true, false) +} + +func BenchmarkRead1BCompressible(b *testing.B) { + benchmark(b, 1, false, true) +} + +func BenchmarkRead1BNoncompressible(b *testing.B) { + benchmark(b, 1, false, false) +} + +func BenchmarkRead4KCompressible(b *testing.B) { + benchmark(b, 4096, false, true) +} + +func BenchmarkRead4KNoncompressible(b *testing.B) { + benchmark(b, 4096, false, false) +} + +func BenchmarkRead1MCompressible(b *testing.B) { + benchmark(b, 1024*1024, false, true) +} + +func BenchmarkRead1MNoncompressible(b *testing.B) { + benchmark(b, 1024*1024, false, false) +} diff --git a/pkg/state/stats.go b/pkg/state/stats.go new file mode 100644 index 000000000..1ebd8ebb4 --- /dev/null +++ b/pkg/state/stats.go @@ -0,0 +1,133 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "time" +) + +type statEntry struct { + count uint + total time.Duration +} + +// Stats tracks encode / decode timing. +// +// This currently provides a meaningful String function and no other way to +// extract stats about individual types. +// +// All exported receivers accept nil. +type Stats struct { + // byType contains a breakdown of time spent by type. + byType map[reflect.Type]*statEntry + + // stack contains objects in progress. + stack []reflect.Type + + // last is the last start time. + last time.Time +} + +// sample adds the given number of samples to the given object. +func (s *Stats) sample(typ reflect.Type, count uint) { + if s.byType == nil { + s.byType = make(map[reflect.Type]*statEntry) + } + entry, ok := s.byType[typ] + if !ok { + entry = new(statEntry) + s.byType[typ] = entry + } + now := time.Now() + entry.count += count + entry.total += now.Sub(s.last) + s.last = now +} + +// Start starts a sample. +func (s *Stats) Start(obj reflect.Value) { + if s == nil { + return + } + if len(s.stack) > 0 { + last := s.stack[len(s.stack)-1] + s.sample(last, 0) + } else { + // First time sample. + s.last = time.Now() + } + s.stack = append(s.stack, obj.Type()) +} + +// Done finishes the current sample. +func (s *Stats) Done() { + if s == nil { + return + } + last := s.stack[len(s.stack)-1] + s.sample(last, 1) + s.stack = s.stack[:len(s.stack)-1] +} + +type sliceEntry struct { + typ reflect.Type + entry *statEntry +} + +// String returns a table representation of the stats. +func (s *Stats) String() string { + if s == nil || len(s.byType) == 0 { + return "(no data)" + } + + // Build a list of stat entries. + ss := make([]sliceEntry, 0, len(s.byType)) + for typ, entry := range s.byType { + ss = append(ss, sliceEntry{ + typ: typ, + entry: entry, + }) + } + + // Sort by total time (descending). + sort.Slice(ss, func(i, j int) bool { + return ss[i].entry.total > ss[j].entry.total + }) + + // Print the stat results. + var ( + buf bytes.Buffer + count uint + total time.Duration + ) + buf.WriteString("\n") + buf.WriteString(fmt.Sprintf("%12s | %8s | %8s | %s\n", "total", "count", "per", "type")) + buf.WriteString("-------------+----------+----------+-------------\n") + for _, se := range ss { + count += se.entry.count + total += se.entry.total + per := se.entry.total / time.Duration(se.entry.count) + buf.WriteString(fmt.Sprintf("%12s | %8d | %8s | %s\n", + se.entry.total, se.entry.count, per, se.typ.String())) + } + buf.WriteString("-------------+----------+----------+-------------\n") + buf.WriteString(fmt.Sprintf("%12s | %8d | %8s | [all]", + total, count, total/time.Duration(count))) + return string(buf.Bytes()) +} diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD new file mode 100644 index 000000000..1fc0c25b5 --- /dev/null +++ b/pkg/sync/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//:sandbox"], + licenses = ["notice"], # Apache 2.0, portions BSD +) + +load("//tools/go_generics:defs.bzl", "go_template") + +go_template( + name = "generic_seqatomic", + srcs = ["seqatomic_unsafe.go"], + types = [ + "Value", + ], + deps = [ + ":sync", + ], +) + +go_library( + name = "sync", + srcs = [ + "downgradable_rwmutex_unsafe.go", + "memmove_unsafe.go", + "norace_unsafe.go", + "race_unsafe.go", + "seqcount.go", + "sync.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/sync", +) + +go_test( + name = "sync_test", + size = "small", + srcs = [ + "downgradable_rwmutex_test.go", + "seqcount_test.go", + ], + embed = [":sync"], +) diff --git a/pkg/sync/downgradable_rwmutex_test.go b/pkg/sync/downgradable_rwmutex_test.go new file mode 100644 index 000000000..99c1c8be0 --- /dev/null +++ b/pkg/sync/downgradable_rwmutex_test.go @@ -0,0 +1,149 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// GOMAXPROCS=10 go test + +// Copy/pasted from the standard library's sync/rwmutex_test.go, except for the +// addition of downgradingWriter and the renaming of num_iterations to +// numIterations to shut up Golint. + +package sync + +import ( + "fmt" + "runtime" + "sync/atomic" + "testing" +) + +func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) { + m.RLock() + clocked <- true + <-cunlock + m.RUnlock() + cdone <- true +} + +func doTestParallelReaders(numReaders, gomaxprocs int) { + runtime.GOMAXPROCS(gomaxprocs) + var m DowngradableRWMutex + clocked := make(chan bool) + cunlock := make(chan bool) + cdone := make(chan bool) + for i := 0; i < numReaders; i++ { + go parallelReader(&m, clocked, cunlock, cdone) + } + // Wait for all parallel RLock()s to succeed. + for i := 0; i < numReaders; i++ { + <-clocked + } + for i := 0; i < numReaders; i++ { + cunlock <- true + } + // Wait for the goroutines to finish. + for i := 0; i < numReaders; i++ { + <-cdone + } +} + +func TestParallelReaders(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + doTestParallelReaders(1, 4) + doTestParallelReaders(3, 4) + doTestParallelReaders(4, 2) +} + +func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { + for i := 0; i < numIterations; i++ { + rwm.RLock() + n := atomic.AddInt32(activity, 1) + if n < 1 || n >= 10000 { + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -1) + rwm.RUnlock() + } + cdone <- true +} + +func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { + for i := 0; i < numIterations; i++ { + rwm.Lock() + n := atomic.AddInt32(activity, 10000) + if n != 10000 { + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -10000) + rwm.Unlock() + } + cdone <- true +} + +func downgradingWriter(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { + for i := 0; i < numIterations; i++ { + rwm.Lock() + n := atomic.AddInt32(activity, 10000) + if n != 10000 { + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -10000) + rwm.DowngradeLock() + n = atomic.AddInt32(activity, 1) + if n < 1 || n >= 10000 { + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + n = atomic.AddInt32(activity, -1) + rwm.RUnlock() + } + cdone <- true +} + +func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) { + runtime.GOMAXPROCS(gomaxprocs) + // Number of active readers + 10000 * number of active writers. + var activity int32 + var rwm DowngradableRWMutex + cdone := make(chan bool) + go writer(&rwm, numIterations, &activity, cdone) + go downgradingWriter(&rwm, numIterations, &activity, cdone) + var i int + for i = 0; i < numReaders/2; i++ { + go reader(&rwm, numIterations, &activity, cdone) + } + go writer(&rwm, numIterations, &activity, cdone) + go downgradingWriter(&rwm, numIterations, &activity, cdone) + for ; i < numReaders; i++ { + go reader(&rwm, numIterations, &activity, cdone) + } + // Wait for the 4 writers and all readers to finish. + for i := 0; i < 4+numReaders; i++ { + <-cdone + } +} + +func TestDowngradableRWMutex(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + n := 1000 + if testing.Short() { + n = 5 + } + HammerDowngradableRWMutex(1, 1, n) + HammerDowngradableRWMutex(1, 3, n) + HammerDowngradableRWMutex(1, 10, n) + HammerDowngradableRWMutex(4, 1, n) + HammerDowngradableRWMutex(4, 3, n) + HammerDowngradableRWMutex(4, 10, n) + HammerDowngradableRWMutex(10, 1, n) + HammerDowngradableRWMutex(10, 3, n) + HammerDowngradableRWMutex(10, 10, n) + HammerDowngradableRWMutex(10, 5, n) +} diff --git a/pkg/sync/downgradable_rwmutex_unsafe.go b/pkg/sync/downgradable_rwmutex_unsafe.go new file mode 100644 index 000000000..9a96056fc --- /dev/null +++ b/pkg/sync/downgradable_rwmutex_unsafe.go @@ -0,0 +1,143 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is mostly copied from the standard library's sync/rwmutex.go. +// +// Happens-before relationships indicated to the race detector: +// - Unlock -> Lock (via writerSem) +// - Unlock -> RLock (via readerSem) +// - RUnlock -> Lock (via writerSem) +// - DowngradeLock -> RLock (via readerSem) + +package sync + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock +// method. +type DowngradableRWMutex struct { + w sync.Mutex // held if there are pending writers + writerSem uint32 // semaphore for writers to wait for completing readers + readerSem uint32 // semaphore for readers to wait for completing writers + readerCount int32 // number of pending readers + readerWait int32 // number of departing readers +} + +const rwmutexMaxReaders = 1 << 30 + +// RLock locks rw for reading. +func (rw *DowngradableRWMutex) RLock() { + if RaceEnabled { + RaceDisable() + } + if atomic.AddInt32(&rw.readerCount, 1) < 0 { + // A writer is pending, wait for it. + runtimeSemacquire(&rw.readerSem) + } + if RaceEnabled { + RaceEnable() + RaceAcquire(unsafe.Pointer(&rw.readerSem)) + } +} + +// RUnlock undoes a single RLock call. +func (rw *DowngradableRWMutex) RUnlock() { + if RaceEnabled { + // TODO: Why does this need to be ReleaseMerge instead of + // Release? IIUC this establishes Unlock happens-before RUnlock, which + // seems unnecessary. + RaceReleaseMerge(unsafe.Pointer(&rw.writerSem)) + RaceDisable() + } + if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { + if r+1 == 0 || r+1 == -rwmutexMaxReaders { + panic("RUnlock of unlocked DowngradableRWMutex") + } + // A writer is pending. + if atomic.AddInt32(&rw.readerWait, -1) == 0 { + // The last reader unblocks the writer. + runtimeSemrelease(&rw.writerSem, false) + } + } + if RaceEnabled { + RaceEnable() + } +} + +// Lock locks rw for writing. +func (rw *DowngradableRWMutex) Lock() { + if RaceEnabled { + RaceDisable() + } + // First, resolve competition with other writers. + rw.w.Lock() + // Announce to readers there is a pending writer. + r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders + // Wait for active readers. + if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { + runtimeSemacquire(&rw.writerSem) + } + if RaceEnabled { + RaceEnable() + RaceAcquire(unsafe.Pointer(&rw.writerSem)) + } +} + +// Unlock unlocks rw for writing. +func (rw *DowngradableRWMutex) Unlock() { + if RaceEnabled { + RaceRelease(unsafe.Pointer(&rw.writerSem)) + RaceRelease(unsafe.Pointer(&rw.readerSem)) + RaceDisable() + } + // Announce to readers there is no active writer. + r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) + if r >= rwmutexMaxReaders { + panic("Unlock of unlocked DowngradableRWMutex") + } + // Unblock blocked readers, if any. + for i := 0; i < int(r); i++ { + runtimeSemrelease(&rw.readerSem, false) + } + // Allow other writers to proceed. + rw.w.Unlock() + if RaceEnabled { + RaceEnable() + } +} + +// DowngradeLock atomically unlocks rw for writing and locks it for reading. +func (rw *DowngradableRWMutex) DowngradeLock() { + if RaceEnabled { + RaceRelease(unsafe.Pointer(&rw.readerSem)) + RaceDisable() + } + // Announce to readers there is no active writer and one additional reader. + r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1) + if r >= rwmutexMaxReaders+1 { + panic("DowngradeLock of unlocked DowngradableRWMutex") + } + // Unblock blocked readers, if any. Note that this loop starts as 1 since r + // includes this goroutine. + for i := 1; i < int(r); i++ { + runtimeSemrelease(&rw.readerSem, false) + } + // Allow other writers to proceed to rw.w.Lock(). Note that they will still + // block on rw.writerSem since at least this reader exists, such that + // DowngradeLock() is atomic with the previous write lock. + rw.w.Unlock() + if RaceEnabled { + RaceEnable() + } +} + +//go:linkname runtimeSemacquire sync.runtime_Semacquire +func runtimeSemacquire(s *uint32) + +//go:linkname runtimeSemrelease sync.runtime_Semrelease +func runtimeSemrelease(s *uint32, handoff bool) diff --git a/pkg/sync/memmove_unsafe.go b/pkg/sync/memmove_unsafe.go new file mode 100644 index 000000000..0c992d5a4 --- /dev/null +++ b/pkg/sync/memmove_unsafe.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +import ( + "unsafe" +) + +// Memmove is exported for SeqAtomicLoad/SeqAtomicTryLoad<T>, which can't +// define it because go_generics can't update the go:linkname annotation. +// Furthermore, go:linkname silently doesn't work if the local name is exported +// (this is of course undocumented), which is why this indirection is +// necessary. +func Memmove(to, from unsafe.Pointer, n uintptr) { + memmove(to, from, n) +} + +//go:linkname memmove runtime.memmove +//go:noescape +func memmove(to, from unsafe.Pointer, n uintptr) diff --git a/pkg/sync/norace_unsafe.go b/pkg/sync/norace_unsafe.go new file mode 100644 index 000000000..968665078 --- /dev/null +++ b/pkg/sync/norace_unsafe.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !race + +package sync + +import ( + "unsafe" +) + +// RaceEnabled is true if the Go data race detector is enabled. +const RaceEnabled = false + +// RaceDisable has the same semantics as runtime.RaceDisable. +func RaceDisable() { +} + +// RaceEnable has the same semantics as runtime.RaceEnable. +func RaceEnable() { +} + +// RaceAcquire has the same semantics as runtime.RaceAcquire. +func RaceAcquire(addr unsafe.Pointer) { +} + +// RaceRelease has the same semantics as runtime.RaceRelease. +func RaceRelease(addr unsafe.Pointer) { +} + +// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge. +func RaceReleaseMerge(addr unsafe.Pointer) { +} diff --git a/pkg/sync/race_unsafe.go b/pkg/sync/race_unsafe.go new file mode 100644 index 000000000..d143a21c7 --- /dev/null +++ b/pkg/sync/race_unsafe.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build race + +package sync + +import ( + "runtime" + "unsafe" +) + +// RaceEnabled is true if the Go data race detector is enabled. +const RaceEnabled = true + +// RaceDisable has the same semantics as runtime.RaceDisable. +func RaceDisable() { + runtime.RaceDisable() +} + +// RaceEnable has the same semantics as runtime.RaceEnable. +func RaceEnable() { + runtime.RaceEnable() +} + +// RaceAcquire has the same semantics as runtime.RaceAcquire. +func RaceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +// RaceRelease has the same semantics as runtime.RaceRelease. +func RaceRelease(addr unsafe.Pointer) { + runtime.RaceRelease(addr) +} + +// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge. +func RaceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} diff --git a/pkg/sync/seqatomic_unsafe.go b/pkg/sync/seqatomic_unsafe.go new file mode 100644 index 000000000..a18e1229a --- /dev/null +++ b/pkg/sync/seqatomic_unsafe.go @@ -0,0 +1,81 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package template doesn't exist. This file must be instantiated using the +// go_template_instance rule in tools/go_generics/defs.bzl. +package template + +import ( + "fmt" + "reflect" + "strings" + "unsafe" + + ssync "gvisor.googlesource.com/gvisor/pkg/sync" +) + +// Value is a required type parameter. +// +// Value must not contain any pointers, including interface objects, function +// objects, slices, maps, channels, unsafe.Pointer, and arrays or structs +// containing any of the above. An init() function will panic if this property +// does not hold. +type Value struct{} + +// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race +// with any writer critical sections in sc. +func SeqAtomicLoad(sc *ssync.SeqCount, ptr *Value) Value { + // This function doesn't use SeqAtomicTryLoad because doing so is + // measurably, significantly (~20%) slower; Go is awful at inlining. + var val Value + for { + epoch := sc.BeginRead() + if ssync.RaceEnabled { + // runtime.RaceDisable() doesn't actually stop the race detector, + // so it can't help us here. Instead, call runtime.memmove + // directly, which is not instrumented by the race detector. + ssync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) + } else { + // This is ~40% faster for short reads than going through memmove. + val = *ptr + } + if sc.ReadOk(epoch) { + break + } + } + return val +} + +// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section +// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read +// would race with a writer critical section, SeqAtomicTryLoad returns +// (unspecified, false). +func SeqAtomicTryLoad(sc *ssync.SeqCount, epoch ssync.SeqCountEpoch, ptr *Value) (Value, bool) { + var val Value + if ssync.RaceEnabled { + ssync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val)) + } else { + val = *ptr + } + return val, sc.ReadOk(epoch) +} + +func init() { + var val Value + typ := reflect.TypeOf(val) + name := typ.Name() + if ptrs := ssync.PointersInType(typ, name); len(ptrs) != 0 { + panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n"))) + } +} diff --git a/pkg/sync/seqatomictest/BUILD b/pkg/sync/seqatomictest/BUILD new file mode 100644 index 000000000..9d6ee2dfb --- /dev/null +++ b/pkg/sync/seqatomictest/BUILD @@ -0,0 +1,34 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") + +go_template_instance( + name = "seqatomic_int", + out = "seqatomic_int.go", + package = "seqatomic", + suffix = "Int", + template = "//pkg/sync:generic_seqatomic", + types = { + "Value": "int", + }, +) + +go_library( + name = "seqatomic", + srcs = ["seqatomic_int.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/sync/seqatomic", + deps = [ + "//pkg/sync", + ], +) + +go_test( + name = "seqatomic_test", + size = "small", + srcs = ["seqatomic_test.go"], + embed = [":seqatomic"], + deps = [ + "//pkg/sync", + ], +) diff --git a/pkg/sync/seqatomictest/seqatomic_test.go b/pkg/sync/seqatomictest/seqatomic_test.go new file mode 100644 index 000000000..b785d2344 --- /dev/null +++ b/pkg/sync/seqatomictest/seqatomic_test.go @@ -0,0 +1,132 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seqatomic + +import ( + "sync/atomic" + "testing" + "time" + + gvsync "gvisor.googlesource.com/gvisor/pkg/sync" +) + +func TestSeqAtomicLoadUncontended(t *testing.T) { + var seq gvsync.SeqCount + const want = 1 + data := want + if got := SeqAtomicLoadInt(&seq, &data); got != want { + t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want) + } +} + +func TestSeqAtomicLoadAfterWrite(t *testing.T) { + var seq gvsync.SeqCount + var data int + const want = 1 + seq.BeginWrite() + data = want + seq.EndWrite() + if got := SeqAtomicLoadInt(&seq, &data); got != want { + t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want) + } +} + +func TestSeqAtomicLoadDuringWrite(t *testing.T) { + var seq gvsync.SeqCount + var data int + const want = 1 + seq.BeginWrite() + go func() { + time.Sleep(time.Second) + data = want + seq.EndWrite() + }() + if got := SeqAtomicLoadInt(&seq, &data); got != want { + t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want) + } +} + +func TestSeqAtomicTryLoadUncontended(t *testing.T) { + var seq gvsync.SeqCount + const want = 1 + data := want + epoch := seq.BeginRead() + if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want { + t.Errorf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want) + } +} + +func TestSeqAtomicTryLoadDuringWrite(t *testing.T) { + var seq gvsync.SeqCount + var data int + epoch := seq.BeginRead() + seq.BeginWrite() + if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok { + t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got) + } + seq.EndWrite() +} + +func TestSeqAtomicTryLoadAfterWrite(t *testing.T) { + var seq gvsync.SeqCount + var data int + epoch := seq.BeginRead() + seq.BeginWrite() + seq.EndWrite() + if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok { + t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got) + } +} + +func BenchmarkSeqAtomicLoadIntUncontended(b *testing.B) { + var seq gvsync.SeqCount + const want = 42 + data := want + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if got := SeqAtomicLoadInt(&seq, &data); got != want { + b.Fatalf("SeqAtomicLoadInt: got %v, wanted %v", got, want) + } + } + }) +} + +func BenchmarkSeqAtomicTryLoadIntUncontended(b *testing.B) { + var seq gvsync.SeqCount + const want = 42 + data := want + b.RunParallel(func(pb *testing.PB) { + epoch := seq.BeginRead() + for pb.Next() { + if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want { + b.Fatalf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want) + } + } + }) +} + +// For comparison: +func BenchmarkAtomicValueLoadIntUncontended(b *testing.B) { + var a atomic.Value + const want = 42 + a.Store(int(want)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if got := a.Load().(int); got != want { + b.Fatalf("atomic.Value.Load: got %v, wanted %v", got, want) + } + } + }) +} diff --git a/pkg/sync/seqcount.go b/pkg/sync/seqcount.go new file mode 100644 index 000000000..8e3304d69 --- /dev/null +++ b/pkg/sync/seqcount.go @@ -0,0 +1,158 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +import ( + "fmt" + "reflect" + "runtime" + "sync/atomic" +) + +// SeqCount is a synchronization primitive for optimistic reader/writer +// synchronization in cases where readers can work with stale data and +// therefore do not need to block writers. +// +// Compared to sync/atomic.Value: +// +// - Mutation of SeqCount-protected data does not require memory allocation, +// whereas atomic.Value generally does. This is a significant advantage when +// writes are common. +// +// - Atomic reads of SeqCount-protected data require copying. This is a +// disadvantage when atomic reads are common. +// +// - SeqCount may be more flexible: correct use of SeqCount.ReadOk allows other +// operations to be made atomic with reads of SeqCount-protected data. +// +// - SeqCount may be less flexible: as of this writing, SeqCount-protected data +// cannot include pointers. +// +// - SeqCount is more cumbersome to use; atomic reads of SeqCount-protected +// data require instantiating function templates using go_generics (see +// seqatomic.go). +type SeqCount struct { + // epoch is incremented by BeginWrite and EndWrite, such that epoch is odd + // if a writer critical section is active, and a read from data protected + // by this SeqCount is atomic iff epoch is the same even value before and + // after the read. + epoch uint32 +} + +// SeqCountEpoch tracks writer critical sections in a SeqCount. +type SeqCountEpoch struct { + val uint32 +} + +// We assume that: +// +// - All functions in sync/atomic that perform a memory read are at least a +// read fence: memory reads before calls to such functions cannot be reordered +// after the call, and memory reads after calls to such functions cannot be +// reordered before the call, even if those reads do not use sync/atomic. +// +// - All functions in sync/atomic that perform a memory write are at least a +// write fence: memory writes before calls to such functions cannot be +// reordered after the call, and memory writes after calls to such functions +// cannot be reordered before the call, even if those writes do not use +// sync/atomic. +// +// As of this writing, the Go memory model completely fails to describe +// sync/atomic, but these properties are implied by +// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8. + +// BeginRead indicates the beginning of a reader critical section. Reader +// critical sections DO NOT BLOCK writer critical sections, so operations in a +// reader critical section MAY RACE with writer critical sections. Races are +// detected by ReadOk at the end of the reader critical section. Thus, the +// low-level structure of readers is generally: +// +// for { +// epoch := seq.BeginRead() +// // do something idempotent with seq-protected data +// if seq.ReadOk(epoch) { +// break +// } +// } +// +// However, since reader critical sections may race with writer critical +// sections, the Go race detector will (accurately) flag data races in readers +// using this pattern. Most users of SeqCount will need to use the +// SeqAtomicLoad function template in seqatomic.go. +func (s *SeqCount) BeginRead() SeqCountEpoch { + epoch := atomic.LoadUint32(&s.epoch) + for epoch&1 != 0 { + runtime.Gosched() + epoch = atomic.LoadUint32(&s.epoch) + } + return SeqCountEpoch{epoch} +} + +// ReadOk returns true if the reader critical section initiated by a previous +// call to BeginRead() that returned epoch did not race with any writer critical +// sections. +// +// ReadOk may be called any number of times during a reader critical section. +// Reader critical sections do not need to be explicitly terminated; the last +// call to ReadOk is implicitly the end of the reader critical section. +func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool { + return atomic.LoadUint32(&s.epoch) == epoch.val +} + +// BeginWrite indicates the beginning of a writer critical section. +// +// SeqCount does not support concurrent writer critical sections; clients with +// concurrent writers must synchronize them using e.g. sync.Mutex. +func (s *SeqCount) BeginWrite() { + if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 == 0 { + panic("SeqCount.BeginWrite during writer critical section") + } +} + +// EndWrite ends the effect of a preceding BeginWrite. +func (s *SeqCount) EndWrite() { + if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 { + panic("SeqCount.EndWrite outside writer critical section") + } +} + +// PointersInType returns a list of pointers reachable from values named +// valName of the given type. +// +// PointersInType is not exhaustive, but it is guaranteed that if typ contains +// at least one pointer, then PointersInTypeOf returns a non-empty list. +func PointersInType(typ reflect.Type, valName string) []string { + switch kind := typ.Kind(); kind { + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + return nil + + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.String, reflect.UnsafePointer: + return []string{valName} + + case reflect.Array: + return PointersInType(typ.Elem(), valName+"[]") + + case reflect.Struct: + var ptrs []string + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + ptrs = append(ptrs, PointersInType(field.Type, fmt.Sprintf("%s.%s", valName, field.Name))...) + } + return ptrs + + default: + return []string{fmt.Sprintf("%s (of type %s with unknown kind %s)", valName, typ, kind)} + } +} diff --git a/pkg/sync/seqcount_test.go b/pkg/sync/seqcount_test.go new file mode 100644 index 000000000..fa4abed1d --- /dev/null +++ b/pkg/sync/seqcount_test.go @@ -0,0 +1,162 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sync + +import ( + "reflect" + "testing" + "time" +) + +func TestSeqCountWriteUncontended(t *testing.T) { + var seq SeqCount + seq.BeginWrite() + seq.EndWrite() +} + +func TestSeqCountReadUncontended(t *testing.T) { + var seq SeqCount + epoch := seq.BeginRead() + if !seq.ReadOk(epoch) { + t.Errorf("ReadOk: got false, wanted true") + } +} + +func TestSeqCountBeginReadAfterWrite(t *testing.T) { + var seq SeqCount + var data int32 + const want = 1 + seq.BeginWrite() + data = want + seq.EndWrite() + epoch := seq.BeginRead() + if data != want { + t.Errorf("Reader: got %v, wanted %v", data, want) + } + if !seq.ReadOk(epoch) { + t.Errorf("ReadOk: got false, wanted true") + } +} + +func TestSeqCountBeginReadDuringWrite(t *testing.T) { + var seq SeqCount + var data int + const want = 1 + seq.BeginWrite() + go func() { + time.Sleep(time.Second) + data = want + seq.EndWrite() + }() + epoch := seq.BeginRead() + if data != want { + t.Errorf("Reader: got %v, wanted %v", data, want) + } + if !seq.ReadOk(epoch) { + t.Errorf("ReadOk: got false, wanted true") + } +} + +func TestSeqCountReadOkAfterWrite(t *testing.T) { + var seq SeqCount + epoch := seq.BeginRead() + seq.BeginWrite() + seq.EndWrite() + if seq.ReadOk(epoch) { + t.Errorf("ReadOk: got true, wanted false") + } +} + +func TestSeqCountReadOkDuringWrite(t *testing.T) { + var seq SeqCount + epoch := seq.BeginRead() + seq.BeginWrite() + if seq.ReadOk(epoch) { + t.Errorf("ReadOk: got true, wanted false") + } + seq.EndWrite() +} + +func BenchmarkSeqCountWriteUncontended(b *testing.B) { + var seq SeqCount + for i := 0; i < b.N; i++ { + seq.BeginWrite() + seq.EndWrite() + } +} + +func BenchmarkSeqCountReadUncontended(b *testing.B) { + var seq SeqCount + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + epoch := seq.BeginRead() + if !seq.ReadOk(epoch) { + b.Fatalf("ReadOk: got false, wanted true") + } + } + }) +} + +func TestPointersInType(t *testing.T) { + for _, test := range []struct { + name string // used for both test and value name + val interface{} + ptrs []string + }{ + { + name: "EmptyStruct", + val: struct{}{}, + }, + { + name: "Int", + val: int(0), + }, + { + name: "MixedStruct", + val: struct { + b bool + I int + ExportedPtr *struct{} + unexportedPtr *struct{} + arr [2]int + ptrArr [2]*int + nestedStruct struct { + nestedNonptr int + nestedPtr *int + } + structArr [1]struct { + nonptr int + ptr *int + } + }{}, + ptrs: []string{ + "MixedStruct.ExportedPtr", + "MixedStruct.unexportedPtr", + "MixedStruct.ptrArr[]", + "MixedStruct.nestedStruct.nestedPtr", + "MixedStruct.structArr[].ptr", + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + typ := reflect.TypeOf(test.val) + ptrs := PointersInType(typ, test.name) + t.Logf("Found pointers: %v", ptrs) + if (len(ptrs) != 0 || len(test.ptrs) != 0) && !reflect.DeepEqual(ptrs, test.ptrs) { + t.Errorf("Got %v, wanted %v", ptrs, test.ptrs) + } + }) + } +} diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go new file mode 100644 index 000000000..36d4c4dee --- /dev/null +++ b/pkg/sync/sync.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sync provides synchronization primitives. +package sync diff --git a/pkg/syserr/BUILD b/pkg/syserr/BUILD new file mode 100644 index 000000000..e5ce48412 --- /dev/null +++ b/pkg/syserr/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "syserr", + srcs = [ + "host_linux.go", + "linuxabi.go", + "netstack.go", + "syserr.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/syserr", + visibility = ["//visibility:public"], + deps = [ + "//pkg/abi/linux", + "//pkg/syserror", + "//pkg/tcpip", + ], +) diff --git a/pkg/syserr/host_linux.go b/pkg/syserr/host_linux.go new file mode 100644 index 000000000..ffd78e8f8 --- /dev/null +++ b/pkg/syserr/host_linux.go @@ -0,0 +1,160 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package syserr + +import ( + "syscall" +) + +var linuxHostTranslations = map[syscall.Errno]*Error{ + syscall.EPERM: ErrNotPermitted, + syscall.ENOENT: ErrNoFileOrDir, + syscall.ESRCH: ErrNoProcess, + syscall.EINTR: ErrInterrupted, + syscall.EIO: ErrIO, + syscall.ENXIO: ErrDeviceOrAddress, + syscall.E2BIG: ErrTooManyArgs, + syscall.ENOEXEC: ErrEcec, + syscall.EBADF: ErrBadFD, + syscall.ECHILD: ErrNoChild, + syscall.EAGAIN: ErrTryAgain, + syscall.ENOMEM: ErrNoMemory, + syscall.EACCES: ErrPermissionDenied, + syscall.EFAULT: ErrBadAddress, + syscall.ENOTBLK: ErrNotBlockDevice, + syscall.EBUSY: ErrBusy, + syscall.EEXIST: ErrExists, + syscall.EXDEV: ErrCrossDeviceLink, + syscall.ENODEV: ErrNoDevice, + syscall.ENOTDIR: ErrNotDir, + syscall.EISDIR: ErrIsDir, + syscall.EINVAL: ErrInvalidArgument, + syscall.ENFILE: ErrFileTableOverflow, + syscall.EMFILE: ErrTooManyOpenFiles, + syscall.ENOTTY: ErrNotTTY, + syscall.ETXTBSY: ErrTestFileBusy, + syscall.EFBIG: ErrFileTooBig, + syscall.ENOSPC: ErrNoSpace, + syscall.ESPIPE: ErrIllegalSeek, + syscall.EROFS: ErrReadOnlyFS, + syscall.EMLINK: ErrTooManyLinks, + syscall.EPIPE: ErrBrokenPipe, + syscall.EDOM: ErrDomain, + syscall.ERANGE: ErrRange, + syscall.EDEADLOCK: ErrDeadlock, + syscall.ENAMETOOLONG: ErrNameTooLong, + syscall.ENOLCK: ErrNoLocksAvailable, + syscall.ENOSYS: ErrInvalidSyscall, + syscall.ENOTEMPTY: ErrDirNotEmpty, + syscall.ELOOP: ErrLinkLoop, + syscall.ENOMSG: ErrNoMessage, + syscall.EIDRM: ErrIdentifierRemoved, + syscall.ECHRNG: ErrChannelOutOfRange, + syscall.EL2NSYNC: ErrLevelTwoNotSynced, + syscall.EL3HLT: ErrLevelThreeHalted, + syscall.EL3RST: ErrLevelThreeReset, + syscall.ELNRNG: ErrLinkNumberOutOfRange, + syscall.EUNATCH: ErrProtocolDriverNotAttached, + syscall.ENOCSI: ErrNoCSIAvailable, + syscall.EL2HLT: ErrLevelTwoHalted, + syscall.EBADE: ErrInvalidExchange, + syscall.EBADR: ErrInvalidRequestDescriptor, + syscall.EXFULL: ErrExchangeFull, + syscall.ENOANO: ErrNoAnode, + syscall.EBADRQC: ErrInvalidRequestCode, + syscall.EBADSLT: ErrInvalidSlot, + syscall.EBFONT: ErrBadFontFile, + syscall.ENOSTR: ErrNotStream, + syscall.ENODATA: ErrNoDataAvailable, + syscall.ETIME: ErrTimerExpired, + syscall.ENOSR: ErrStreamsResourceDepleted, + syscall.ENONET: ErrMachineNotOnNetwork, + syscall.ENOPKG: ErrPackageNotInstalled, + syscall.EREMOTE: ErrIsRemote, + syscall.ENOLINK: ErrNoLink, + syscall.EADV: ErrAdvertise, + syscall.ESRMNT: ErrSRMount, + syscall.ECOMM: ErrSendCommunication, + syscall.EPROTO: ErrProtocol, + syscall.EMULTIHOP: ErrMultihopAttempted, + syscall.EDOTDOT: ErrRFS, + syscall.EBADMSG: ErrInvalidDataMessage, + syscall.EOVERFLOW: ErrOverflow, + syscall.ENOTUNIQ: ErrNetworkNameNotUnique, + syscall.EBADFD: ErrFDInBadState, + syscall.EREMCHG: ErrRemoteAddressChanged, + syscall.ELIBACC: ErrSharedLibraryInaccessible, + syscall.ELIBBAD: ErrCorruptedSharedLibrary, + syscall.ELIBSCN: ErrLibSectionCorrupted, + syscall.ELIBMAX: ErrTooManySharedLibraries, + syscall.ELIBEXEC: ErrSharedLibraryExeced, + syscall.EILSEQ: ErrIllegalByteSequence, + syscall.ERESTART: ErrShouldRestart, + syscall.ESTRPIPE: ErrStreamPipe, + syscall.EUSERS: ErrTooManyUsers, + syscall.ENOTSOCK: ErrNotASocket, + syscall.EDESTADDRREQ: ErrDestinationAddressRequired, + syscall.EMSGSIZE: ErrMessageTooLong, + syscall.EPROTOTYPE: ErrWrongProtocolForSocket, + syscall.ENOPROTOOPT: ErrProtocolNotAvailable, + syscall.EPROTONOSUPPORT: ErrProtocolNotSupported, + syscall.ESOCKTNOSUPPORT: ErrSocketNotSupported, + syscall.EOPNOTSUPP: ErrEndpointOperation, + syscall.EPFNOSUPPORT: ErrProtocolFamilyNotSupported, + syscall.EAFNOSUPPORT: ErrAddressFamilyNotSupported, + syscall.EADDRINUSE: ErrAddressInUse, + syscall.EADDRNOTAVAIL: ErrAddressNotAvailable, + syscall.ENETDOWN: ErrNetworkDown, + syscall.ENETUNREACH: ErrNetworkUnreachable, + syscall.ENETRESET: ErrNetworkReset, + syscall.ECONNABORTED: ErrConnectionAborted, + syscall.ECONNRESET: ErrConnectionReset, + syscall.ENOBUFS: ErrNoBufferSpace, + syscall.EISCONN: ErrAlreadyConnected, + syscall.ENOTCONN: ErrNotConnected, + syscall.ESHUTDOWN: ErrShutdown, + syscall.ETOOMANYREFS: ErrTooManyRefs, + syscall.ETIMEDOUT: ErrTimedOut, + syscall.ECONNREFUSED: ErrConnectionRefused, + syscall.EHOSTDOWN: ErrHostDown, + syscall.EHOSTUNREACH: ErrNoRoute, + syscall.EALREADY: ErrAlreadyInProgress, + syscall.EINPROGRESS: ErrInProgress, + syscall.ESTALE: ErrStaleFileHandle, + syscall.EUCLEAN: ErrStructureNeedsCleaning, + syscall.ENOTNAM: ErrIsNamedFile, + syscall.EREMOTEIO: ErrRemoteIO, + syscall.EDQUOT: ErrQuotaExceeded, + syscall.ENOMEDIUM: ErrNoMedium, + syscall.EMEDIUMTYPE: ErrWrongMediumType, + syscall.ECANCELED: ErrCanceled, + syscall.ENOKEY: ErrNoKey, + syscall.EKEYEXPIRED: ErrKeyExpired, + syscall.EKEYREVOKED: ErrKeyRevoked, + syscall.EKEYREJECTED: ErrKeyRejected, + syscall.EOWNERDEAD: ErrOwnerDied, + syscall.ENOTRECOVERABLE: ErrNotRecoverable, +} + +// FromHost translates a syscall.Errno to a corresponding Error value. +func FromHost(err syscall.Errno) *Error { + e, ok := linuxHostTranslations[err] + if !ok { + panic("Unknown host errno " + err.Error()) + } + return e +} diff --git a/pkg/syserr/linuxabi.go b/pkg/syserr/linuxabi.go new file mode 100644 index 000000000..71e3d1a81 --- /dev/null +++ b/pkg/syserr/linuxabi.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syserr + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +var linuxABITranslations = map[*Error]*linux.Errno{} + +// ToLinux translates an Error to a corresponding *linux.Errno value. +func ToLinux(err *Error) *linux.Errno { + le, ok := linuxABITranslations[err] + if !ok { + panic("No Linux ABI translation available for " + err.String()) + } + return le +} diff --git a/pkg/syserr/netstack.go b/pkg/syserr/netstack.go new file mode 100644 index 000000000..36726e967 --- /dev/null +++ b/pkg/syserr/netstack.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syserr + +import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +// Mapping for tcpip.Error types. +var ( + ErrUnknownProtocol = New(tcpip.ErrUnknownProtocol.String(), linux.EINVAL) + ErrUnknownNICID = New(tcpip.ErrUnknownNICID.String(), linux.EINVAL) + ErrUnknownProtocolOption = New(tcpip.ErrUnknownProtocolOption.String(), linux.ENOPROTOOPT) + ErrDuplicateNICID = New(tcpip.ErrDuplicateNICID.String(), linux.EEXIST) + ErrDuplicateAddress = New(tcpip.ErrDuplicateAddress.String(), linux.EEXIST) + ErrBadLinkEndpoint = New(tcpip.ErrBadLinkEndpoint.String(), linux.EINVAL) + ErrAlreadyBound = New(tcpip.ErrAlreadyBound.String(), linux.EINVAL) + ErrInvalidEndpointState = New(tcpip.ErrInvalidEndpointState.String(), linux.EINVAL) + ErrAlreadyConnecting = New(tcpip.ErrAlreadyConnecting.String(), linux.EALREADY) + ErrNoPortAvailable = New(tcpip.ErrNoPortAvailable.String(), linux.EAGAIN) + ErrPortInUse = New(tcpip.ErrPortInUse.String(), linux.EADDRINUSE) + ErrBadLocalAddress = New(tcpip.ErrBadLocalAddress.String(), linux.EADDRNOTAVAIL) + ErrClosedForSend = New(tcpip.ErrClosedForSend.String(), linux.EPIPE) + ErrClosedForReceive = New(tcpip.ErrClosedForReceive.String(), nil) + ErrTimeout = New(tcpip.ErrTimeout.String(), linux.ETIMEDOUT) + ErrAborted = New(tcpip.ErrAborted.String(), linux.EPIPE) + ErrConnectStarted = New(tcpip.ErrConnectStarted.String(), linux.EINPROGRESS) + ErrDestinationRequired = New(tcpip.ErrDestinationRequired.String(), linux.EDESTADDRREQ) + ErrNotSupported = New(tcpip.ErrNotSupported.String(), linux.EOPNOTSUPP) + ErrQueueSizeNotSupported = New(tcpip.ErrQueueSizeNotSupported.String(), linux.ENOTTY) + ErrNoSuchFile = New(tcpip.ErrNoSuchFile.String(), linux.ENOENT) + ErrInvalidOptionValue = New(tcpip.ErrInvalidOptionValue.String(), linux.EINVAL) +) + +var netstackErrorTranslations = map[*tcpip.Error]*Error{ + tcpip.ErrUnknownProtocol: ErrUnknownProtocol, + tcpip.ErrUnknownNICID: ErrUnknownNICID, + tcpip.ErrUnknownProtocolOption: ErrUnknownProtocolOption, + tcpip.ErrDuplicateNICID: ErrDuplicateNICID, + tcpip.ErrDuplicateAddress: ErrDuplicateAddress, + tcpip.ErrNoRoute: ErrNoRoute, + tcpip.ErrBadLinkEndpoint: ErrBadLinkEndpoint, + tcpip.ErrAlreadyBound: ErrAlreadyBound, + tcpip.ErrInvalidEndpointState: ErrInvalidEndpointState, + tcpip.ErrAlreadyConnecting: ErrAlreadyConnecting, + tcpip.ErrAlreadyConnected: ErrAlreadyConnected, + tcpip.ErrNoPortAvailable: ErrNoPortAvailable, + tcpip.ErrPortInUse: ErrPortInUse, + tcpip.ErrBadLocalAddress: ErrBadLocalAddress, + tcpip.ErrClosedForSend: ErrClosedForSend, + tcpip.ErrClosedForReceive: ErrClosedForReceive, + tcpip.ErrWouldBlock: ErrWouldBlock, + tcpip.ErrConnectionRefused: ErrConnectionRefused, + tcpip.ErrTimeout: ErrTimeout, + tcpip.ErrAborted: ErrAborted, + tcpip.ErrConnectStarted: ErrConnectStarted, + tcpip.ErrDestinationRequired: ErrDestinationRequired, + tcpip.ErrNotSupported: ErrNotSupported, + tcpip.ErrQueueSizeNotSupported: ErrQueueSizeNotSupported, + tcpip.ErrNotConnected: ErrNotConnected, + tcpip.ErrConnectionReset: ErrConnectionReset, + tcpip.ErrConnectionAborted: ErrConnectionAborted, + tcpip.ErrNoSuchFile: ErrNoSuchFile, + tcpip.ErrInvalidOptionValue: ErrInvalidOptionValue, + tcpip.ErrNoLinkAddress: ErrHostDown, + tcpip.ErrBadAddress: ErrBadAddress, +} + +// TranslateNetstackError converts an error from the tcpip package to a sentry +// internal error. +func TranslateNetstackError(err *tcpip.Error) *Error { + if err == nil { + return nil + } + se, ok := netstackErrorTranslations[err] + if !ok { + panic("Unknown error: " + err.String()) + } + return se +} diff --git a/pkg/syserr/syserr.go b/pkg/syserr/syserr.go new file mode 100644 index 000000000..eaa0ec920 --- /dev/null +++ b/pkg/syserr/syserr.go @@ -0,0 +1,236 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package syserr contains sandbox-internal errors. These errors are distinct +// from both the errors returned by host system calls and the errors returned +// to sandboxed applications. +package syserr + +import ( + "fmt" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// Error represents an internal error. +type Error struct { + string +} + +// New creates a new Error and adds a translation for it. +func New(message string, linuxTranslation *linux.Errno) *Error { + err := &Error{message} + linuxABITranslations[err] = linuxTranslation + + // TODO: Remove this. + if linuxTranslation == nil { + linuxBackwardsTranslations[err] = nil + } else { + e := error(syscall.Errno(linuxTranslation.Number())) + // syserror.ErrWouldBlock gets translated to syserror.EWOULDBLOCK and + // enables proper blocking semantics. This should temporary address the + // class of blocking bugs that keep popping up with the current state of + // the error space. + if e == syserror.EWOULDBLOCK { + e = syserror.ErrWouldBlock + } + linuxBackwardsTranslations[err] = e + } + + return err +} + +// NewWithoutTranslation creates a new Error. If translation is attempted on +// the error, translation will fail. +func NewWithoutTranslation(message string) *Error { + return &Error{message} +} + +// String implements fmt.Stringer.String. +func (e *Error) String() string { + if e == nil { + return "<nil>" + } + return e.string +} + +// TODO: Remove this. +var linuxBackwardsTranslations = map[*Error]error{} + +// ToError translates an Error to a corresponding error value. +// +// TODO: Remove this. +func (e *Error) ToError() error { + if e == nil { + return nil + } + err, ok := linuxBackwardsTranslations[e] + if !ok { + panic(fmt.Sprintf("unknown error: %q", e.string)) + } + return err +} + +// TODO: Remove or replace most of these errors. +// +// Some of the errors should be replaced with package specific errors and +// others should be removed entirely. +var ( + ErrNotPermitted = New("operation not permitted", linux.EPERM) + ErrNoFileOrDir = New("no such file or directory", linux.ENOENT) + ErrNoProcess = New("no such process", linux.ESRCH) + ErrInterrupted = New("interrupted system call", linux.EINTR) + ErrIO = New("I/O error", linux.EIO) + ErrDeviceOrAddress = New("no such device or address", linux.ENXIO) + ErrTooManyArgs = New("argument list too long", linux.E2BIG) + ErrEcec = New("exec format error", linux.ENOEXEC) + ErrBadFD = New("bad file number", linux.EBADF) + ErrNoChild = New("no child processes", linux.ECHILD) + ErrTryAgain = New("try again", linux.EAGAIN) + ErrNoMemory = New("out of memory", linux.ENOMEM) + ErrPermissionDenied = New("permission denied", linux.EACCES) + ErrBadAddress = New("bad address", linux.EFAULT) + ErrNotBlockDevice = New("block device required", linux.ENOTBLK) + ErrBusy = New("device or resource busy", linux.EBUSY) + ErrExists = New("file exists", linux.EEXIST) + ErrCrossDeviceLink = New("cross-device link", linux.EXDEV) + ErrNoDevice = New("no such device", linux.ENODEV) + ErrNotDir = New("not a directory", linux.ENOTDIR) + ErrIsDir = New("is a directory", linux.EISDIR) + ErrInvalidArgument = New("invalid argument", linux.EINVAL) + ErrFileTableOverflow = New("file table overflow", linux.ENFILE) + ErrTooManyOpenFiles = New("too many open files", linux.EMFILE) + ErrNotTTY = New("not a typewriter", linux.ENOTTY) + ErrTestFileBusy = New("text file busy", linux.ETXTBSY) + ErrFileTooBig = New("file too large", linux.EFBIG) + ErrNoSpace = New("no space left on device", linux.ENOSPC) + ErrIllegalSeek = New("illegal seek", linux.ESPIPE) + ErrReadOnlyFS = New("read-only file system", linux.EROFS) + ErrTooManyLinks = New("too many links", linux.EMLINK) + ErrBrokenPipe = New("broken pipe", linux.EPIPE) + ErrDomain = New("math argument out of domain of func", linux.EDOM) + ErrRange = New("math result not representable", linux.ERANGE) + ErrDeadlock = New("resource deadlock would occur", linux.EDEADLOCK) + ErrNameTooLong = New("file name too long", linux.ENAMETOOLONG) + ErrNoLocksAvailable = New("no record locks available", linux.ENOLCK) + ErrInvalidSyscall = New("invalid system call number", linux.ENOSYS) + ErrDirNotEmpty = New("directory not empty", linux.ENOTEMPTY) + ErrLinkLoop = New("too many symbolic links encountered", linux.ELOOP) + ErrWouldBlock = New("operation would block", linux.EWOULDBLOCK) + ErrNoMessage = New("no message of desired type", linux.ENOMSG) + ErrIdentifierRemoved = New("identifier removed", linux.EIDRM) + ErrChannelOutOfRange = New("channel number out of range", linux.ECHRNG) + ErrLevelTwoNotSynced = New("level 2 not synchronized", linux.EL2NSYNC) + ErrLevelThreeHalted = New("level 3 halted", linux.EL3HLT) + ErrLevelThreeReset = New("level 3 reset", linux.EL3RST) + ErrLinkNumberOutOfRange = New("link number out of range", linux.ELNRNG) + ErrProtocolDriverNotAttached = New("protocol driver not attached", linux.EUNATCH) + ErrNoCSIAvailable = New("no CSI structure available", linux.ENOCSI) + ErrLevelTwoHalted = New("level 2 halted", linux.EL2HLT) + ErrInvalidExchange = New("invalid exchange", linux.EBADE) + ErrInvalidRequestDescriptor = New("invalid request descriptor", linux.EBADR) + ErrExchangeFull = New("exchange full", linux.EXFULL) + ErrNoAnode = New("no anode", linux.ENOANO) + ErrInvalidRequestCode = New("invalid request code", linux.EBADRQC) + ErrInvalidSlot = New("invalid slot", linux.EBADSLT) + ErrBadFontFile = New("bad font file format", linux.EBFONT) + ErrNotStream = New("device not a stream", linux.ENOSTR) + ErrNoDataAvailable = New("no data available", linux.ENODATA) + ErrTimerExpired = New("timer expired", linux.ETIME) + ErrStreamsResourceDepleted = New("out of streams resources", linux.ENOSR) + ErrMachineNotOnNetwork = New("machine is not on the network", linux.ENONET) + ErrPackageNotInstalled = New("package not installed", linux.ENOPKG) + ErrIsRemote = New("object is remote", linux.EREMOTE) + ErrNoLink = New("link has been severed", linux.ENOLINK) + ErrAdvertise = New("advertise error", linux.EADV) + ErrSRMount = New("srmount error", linux.ESRMNT) + ErrSendCommunication = New("communication error on send", linux.ECOMM) + ErrProtocol = New("protocol error", linux.EPROTO) + ErrMultihopAttempted = New("multihop attempted", linux.EMULTIHOP) + ErrRFS = New("RFS specific error", linux.EDOTDOT) + ErrInvalidDataMessage = New("not a data message", linux.EBADMSG) + ErrOverflow = New("value too large for defined data type", linux.EOVERFLOW) + ErrNetworkNameNotUnique = New("name not unique on network", linux.ENOTUNIQ) + ErrFDInBadState = New("file descriptor in bad state", linux.EBADFD) + ErrRemoteAddressChanged = New("remote address changed", linux.EREMCHG) + ErrSharedLibraryInaccessible = New("can not access a needed shared library", linux.ELIBACC) + ErrCorruptedSharedLibrary = New("accessing a corrupted shared library", linux.ELIBBAD) + ErrLibSectionCorrupted = New(".lib section in a.out corrupted", linux.ELIBSCN) + ErrTooManySharedLibraries = New("attempting to link in too many shared libraries", linux.ELIBMAX) + ErrSharedLibraryExeced = New("cannot exec a shared library directly", linux.ELIBEXEC) + ErrIllegalByteSequence = New("illegal byte sequence", linux.EILSEQ) + ErrShouldRestart = New("interrupted system call should be restarted", linux.ERESTART) + ErrStreamPipe = New("streams pipe error", linux.ESTRPIPE) + ErrTooManyUsers = New("too many users", linux.EUSERS) + ErrNotASocket = New("socket operation on non-socket", linux.ENOTSOCK) + ErrDestinationAddressRequired = New("destination address required", linux.EDESTADDRREQ) + ErrMessageTooLong = New("message too long", linux.EMSGSIZE) + ErrWrongProtocolForSocket = New("protocol wrong type for socket", linux.EPROTOTYPE) + ErrProtocolNotAvailable = New("protocol not available", linux.ENOPROTOOPT) + ErrProtocolNotSupported = New("protocol not supported", linux.EPROTONOSUPPORT) + ErrSocketNotSupported = New("socket type not supported", linux.ESOCKTNOSUPPORT) + ErrEndpointOperation = New("operation not supported on transport endpoint", linux.EOPNOTSUPP) + ErrProtocolFamilyNotSupported = New("protocol family not supported", linux.EPFNOSUPPORT) + ErrAddressFamilyNotSupported = New("address family not supported by protocol", linux.EAFNOSUPPORT) + ErrAddressInUse = New("address already in use", linux.EADDRINUSE) + ErrAddressNotAvailable = New("cannot assign requested address", linux.EADDRNOTAVAIL) + ErrNetworkDown = New("network is down", linux.ENETDOWN) + ErrNetworkUnreachable = New("network is unreachable", linux.ENETUNREACH) + ErrNetworkReset = New("network dropped connection because of reset", linux.ENETRESET) + ErrConnectionAborted = New("software caused connection abort", linux.ECONNABORTED) + ErrConnectionReset = New("connection reset by peer", linux.ECONNRESET) + ErrNoBufferSpace = New("no buffer space available", linux.ENOBUFS) + ErrAlreadyConnected = New("transport endpoint is already connected", linux.EISCONN) + ErrNotConnected = New("transport endpoint is not connected", linux.ENOTCONN) + ErrShutdown = New("cannot send after transport endpoint shutdown", linux.ESHUTDOWN) + ErrTooManyRefs = New("too many references: cannot splice", linux.ETOOMANYREFS) + ErrTimedOut = New("connection timed out", linux.ETIMEDOUT) + ErrConnectionRefused = New("connection refused", linux.ECONNREFUSED) + ErrHostDown = New("host is down", linux.EHOSTDOWN) + ErrNoRoute = New("no route to host", linux.EHOSTUNREACH) + ErrAlreadyInProgress = New("operation already in progress", linux.EALREADY) + ErrInProgress = New("operation now in progress", linux.EINPROGRESS) + ErrStaleFileHandle = New("stale file handle", linux.ESTALE) + ErrStructureNeedsCleaning = New("structure needs cleaning", linux.EUCLEAN) + ErrIsNamedFile = New("is a named type file", linux.ENOTNAM) + ErrRemoteIO = New("remote I/O error", linux.EREMOTEIO) + ErrQuotaExceeded = New("quota exceeded", linux.EDQUOT) + ErrNoMedium = New("no medium found", linux.ENOMEDIUM) + ErrWrongMediumType = New("wrong medium type", linux.EMEDIUMTYPE) + ErrCanceled = New("operation Canceled", linux.ECANCELED) + ErrNoKey = New("required key not available", linux.ENOKEY) + ErrKeyExpired = New("key has expired", linux.EKEYEXPIRED) + ErrKeyRevoked = New("key has been revoked", linux.EKEYREVOKED) + ErrKeyRejected = New("key was rejected by service", linux.EKEYREJECTED) + ErrOwnerDied = New("owner died", linux.EOWNERDEAD) + ErrNotRecoverable = New("state not recoverable", linux.ENOTRECOVERABLE) +) + +// FromError converts a generic error to an *Error. +// +// TODO: Remove this function. +func FromError(err error) *Error { + if err == nil { + return nil + } + if errno, ok := err.(syscall.Errno); ok { + return FromHost(errno) + } + if errno, ok := syserror.TranslateError(err); ok { + return FromHost(errno) + } + panic("unknown error: " + err.Error()) +} diff --git a/pkg/syserror/BUILD b/pkg/syserror/BUILD new file mode 100644 index 000000000..68ddec786 --- /dev/null +++ b/pkg/syserror/BUILD @@ -0,0 +1,18 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "syserror", + srcs = ["syserror.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/syserror", + visibility = ["//visibility:public"], +) + +go_test( + name = "syserror_test", + srcs = ["syserror_test.go"], + deps = [ + ":syserror", + ], +) diff --git a/pkg/syserror/syserror.go b/pkg/syserror/syserror.go new file mode 100644 index 000000000..6f8a7a319 --- /dev/null +++ b/pkg/syserror/syserror.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package syserror contains syscall error codes exported as error interface +// instead of Errno. This allows for fast comparison and returns when the +// comparand or return value is of type error because there is no need to +// convert from Errno to an interface, i.e., runtime.convT2I isn't called. +package syserror + +import ( + "errors" + "syscall" +) + +// The following variables have the same meaning as their syscall equivalent. +var ( + E2BIG = error(syscall.E2BIG) + EACCES = error(syscall.EACCES) + EAGAIN = error(syscall.EAGAIN) + EBADF = error(syscall.EBADF) + EBUSY = error(syscall.EBUSY) + ECHILD = error(syscall.ECHILD) + ECONNREFUSED = error(syscall.ECONNREFUSED) + ECONNRESET = error(syscall.ECONNRESET) + EEXIST = error(syscall.EEXIST) + EFAULT = error(syscall.EFAULT) + EFBIG = error(syscall.EFBIG) + EIDRM = error(syscall.EIDRM) + EINTR = error(syscall.EINTR) + EINVAL = error(syscall.EINVAL) + EIO = error(syscall.EIO) + EISDIR = error(syscall.EISDIR) + ELIBBAD = error(syscall.ELIBBAD) + ELOOP = error(syscall.ELOOP) + EMFILE = error(syscall.EMFILE) + ENAMETOOLONG = error(syscall.ENAMETOOLONG) + ENOATTR = ENODATA + ENODATA = error(syscall.ENODATA) + ENODEV = error(syscall.ENODEV) + ENOENT = error(syscall.ENOENT) + ENOEXEC = error(syscall.ENOEXEC) + ENOLCK = error(syscall.ENOLCK) + ENOLINK = error(syscall.ENOLINK) + ENOMEM = error(syscall.ENOMEM) + ENOSPC = error(syscall.ENOSPC) + ENOSYS = error(syscall.ENOSYS) + ENOTDIR = error(syscall.ENOTDIR) + ENOTEMPTY = error(syscall.ENOTEMPTY) + ENOTSUP = error(syscall.ENOTSUP) + ENOTTY = error(syscall.ENOTTY) + ENXIO = error(syscall.ENXIO) + EOPNOTSUPP = error(syscall.EOPNOTSUPP) + EOVERFLOW = error(syscall.EOVERFLOW) + EPERM = error(syscall.EPERM) + EPIPE = error(syscall.EPIPE) + ERANGE = error(syscall.ERANGE) + EROFS = error(syscall.EROFS) + ESPIPE = error(syscall.ESPIPE) + ESRCH = error(syscall.ESRCH) + ETIMEDOUT = error(syscall.ETIMEDOUT) + EUSERS = error(syscall.EUSERS) + EWOULDBLOCK = error(syscall.EWOULDBLOCK) + EXDEV = error(syscall.EXDEV) +) + +var ( + // ErrWouldBlock is an internal error used to indicate that an operation + // cannot be satisfied immediately, and should be retried at a later + // time, possibly when the caller has received a notification that the + // operation may be able to complete. It is used by implementations of + // the kio.File interface. + ErrWouldBlock = errors.New("request would block") + + // ErrInterrupted is returned if a request is interrupted before it can + // complete. + ErrInterrupted = errors.New("request was interrupted") + + // ErrExceedsFileSizeLimit is returned if a request would exceed the + // file's size limit. + ErrExceedsFileSizeLimit = errors.New("exceeds file size limit") +) + +// errorMap is the map used to convert generic errors into errnos. +var errorMap = map[error]syscall.Errno{} + +// errorUnwrappers is an array of unwrap functions to extract typed errors. +var errorUnwrappers = []func(error) (syscall.Errno, bool){} + +// AddErrorTranslation allows modules to populate the error map by adding their +// own translations during initialization. Returns if the error translation is +// accepted or not. A pre-existing translation will not be overwritten by the +// new translation. +func AddErrorTranslation(from error, to syscall.Errno) bool { + if _, ok := errorMap[from]; ok { + return false + } + + errorMap[from] = to + return true +} + +// AddErrorUnwrapper registers an unwrap method that can extract a concrete error +// from a typed, but not initialized, error. +func AddErrorUnwrapper(unwrap func(e error) (syscall.Errno, bool)) { + errorUnwrappers = append(errorUnwrappers, unwrap) +} + +// TranslateError translates errors to errnos, it will return false if +// the error was not registered. +func TranslateError(from error) (syscall.Errno, bool) { + err, ok := errorMap[from] + if ok { + return err, ok + } + // Try to unwrap the error if we couldn't match an error + // exactly. This might mean that a package has its own + // error type. + for _, unwrap := range errorUnwrappers { + err, ok := unwrap(from) + if ok { + return err, ok + } + } + return 0, false +} + +// ConvertIntr converts the provided error code (err) to another one (intr) if +// the first error corresponds to an interrupted operation. +func ConvertIntr(err, intr error) error { + if err == ErrInterrupted { + return intr + } + return err +} + +func init() { + AddErrorTranslation(ErrWouldBlock, syscall.EWOULDBLOCK) + AddErrorTranslation(ErrInterrupted, syscall.EINTR) + AddErrorTranslation(ErrExceedsFileSizeLimit, syscall.EFBIG) +} diff --git a/pkg/syserror/syserror_test.go b/pkg/syserror/syserror_test.go new file mode 100644 index 000000000..fb7d8d5ee --- /dev/null +++ b/pkg/syserror/syserror_test.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syserror_test + +import ( + "errors" + "syscall" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +var globalError error + +func returnErrnoAsError() error { + return syscall.EINVAL +} + +func returnError() error { + return syserror.EINVAL +} + +func BenchmarkReturnErrnoAsError(b *testing.B) { + for i := b.N; i > 0; i-- { + returnErrnoAsError() + } +} + +func BenchmarkReturnError(b *testing.B) { + for i := b.N; i > 0; i-- { + returnError() + } +} + +func BenchmarkCompareErrno(b *testing.B) { + j := 0 + for i := b.N; i > 0; i-- { + if globalError == syscall.EINVAL { + j++ + } + } +} + +func BenchmarkCompareError(b *testing.B) { + j := 0 + for i := b.N; i > 0; i-- { + if globalError == syserror.EINVAL { + j++ + } + } +} + +func BenchmarkSwitchErrno(b *testing.B) { + j := 0 + for i := b.N; i > 0; i-- { + switch globalError { + case syscall.EINVAL: + j += 1 + case syscall.EINTR: + j += 2 + case syscall.EAGAIN: + j += 3 + } + } +} + +func BenchmarkSwitchError(b *testing.B) { + j := 0 + for i := b.N; i > 0; i-- { + switch globalError { + case syserror.EINVAL: + j += 1 + case syserror.EINTR: + j += 2 + case syserror.EAGAIN: + j += 3 + } + } +} + +type translationTestTable struct { + fn string + errIn error + syscallErrorIn syscall.Errno + expectedBool bool + expectedTranslation syscall.Errno +} + +func TestErrorTranslation(t *testing.T) { + myError := errors.New("My test error") + myError2 := errors.New("Another test error") + testTable := []translationTestTable{ + {"TranslateError", myError, 0, false, 0}, + {"TranslateError", myError2, 0, false, 0}, + {"AddErrorTranslation", myError, syscall.EAGAIN, true, 0}, + {"AddErrorTranslation", myError, syscall.EAGAIN, false, 0}, + {"AddErrorTranslation", myError, syscall.EPERM, false, 0}, + {"TranslateError", myError, 0, true, syscall.EAGAIN}, + {"TranslateError", myError2, 0, false, 0}, + {"AddErrorTranslation", myError2, syscall.EPERM, true, 0}, + {"AddErrorTranslation", myError2, syscall.EPERM, false, 0}, + {"AddErrorTranslation", myError2, syscall.EAGAIN, false, 0}, + {"TranslateError", myError, 0, true, syscall.EAGAIN}, + {"TranslateError", myError2, 0, true, syscall.EPERM}, + } + for _, tt := range testTable { + switch tt.fn { + case "TranslateError": + err, ok := syserror.TranslateError(tt.errIn) + if ok != tt.expectedBool { + t.Fatalf("%v(%v) => %v expected %v", tt.fn, tt.errIn, ok, tt.expectedBool) + } else if err != tt.expectedTranslation { + t.Fatalf("%v(%v) (error) => %v expected %v", tt.fn, tt.errIn, err, tt.expectedTranslation) + } + case "AddErrorTranslation": + ok := syserror.AddErrorTranslation(tt.errIn, tt.syscallErrorIn) + if ok != tt.expectedBool { + t.Fatalf("%v(%v) => %v expected %v", tt.fn, tt.errIn, ok, tt.expectedBool) + } + default: + t.Fatalf("Unknown function %v", tt.fn) + } + } +} diff --git a/pkg/tcpip/BUILD b/pkg/tcpip/BUILD new file mode 100644 index 000000000..5c38a4961 --- /dev/null +++ b/pkg/tcpip/BUILD @@ -0,0 +1,35 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "tcpip_state", + srcs = [ + "tcpip.go", + ], + out = "tcpip_state.go", + package = "tcpip", +) + +go_library( + name = "tcpip", + srcs = [ + "tcpip.go", + "tcpip_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip", + visibility = ["//visibility:public"], + deps = [ + "//pkg/state", + "//pkg/tcpip/buffer", + "//pkg/waiter", + ], +) + +go_test( + name = "tcpip_test", + size = "small", + srcs = ["tcpip_test.go"], + embed = [":tcpip"], +) diff --git a/pkg/tcpip/adapters/gonet/BUILD b/pkg/tcpip/adapters/gonet/BUILD new file mode 100644 index 000000000..69cfc84ab --- /dev/null +++ b/pkg/tcpip/adapters/gonet/BUILD @@ -0,0 +1,36 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "gonet", + srcs = ["gonet.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/adapters/gonet", + visibility = ["//visibility:public"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/tcpip/transport/udp", + "//pkg/waiter", + ], +) + +go_test( + name = "gonet_test", + size = "small", + srcs = ["gonet_test.go"], + embed = [":gonet"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/link/loopback", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/tcpip/transport/udp", + "//pkg/waiter", + "@org_golang_x_net//nettest:go_default_library", + ], +) diff --git a/pkg/tcpip/adapters/gonet/gonet.go b/pkg/tcpip/adapters/gonet/gonet.go new file mode 100644 index 000000000..96a2d670d --- /dev/null +++ b/pkg/tcpip/adapters/gonet/gonet.go @@ -0,0 +1,605 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gonet provides a Go net package compatible wrapper for a tcpip stack. +package gonet + +import ( + "errors" + "io" + "net" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +var errCanceled = errors.New("operation canceled") + +// timeoutError is how the net package reports timeouts. +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +// A Listener is a wrapper around a tcpip endpoint that implements +// net.Listener. +type Listener struct { + stack *stack.Stack + ep tcpip.Endpoint + wq *waiter.Queue + cancel chan struct{} +} + +// NewListener creates a new Listener. +func NewListener(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*Listener, error) { + // Create TCP endpoint, bind it, then start listening. + var wq waiter.Queue + ep, err := s.NewEndpoint(tcp.ProtocolNumber, network, &wq) + if err != nil { + return nil, errors.New(err.String()) + } + + if err := ep.Bind(addr, nil); err != nil { + ep.Close() + return nil, &net.OpError{ + Op: "bind", + Net: "tcp", + Addr: fullToTCPAddr(addr), + Err: errors.New(err.String()), + } + } + + if err := ep.Listen(10); err != nil { + ep.Close() + return nil, &net.OpError{ + Op: "listen", + Net: "tcp", + Addr: fullToTCPAddr(addr), + Err: errors.New(err.String()), + } + } + + return &Listener{ + stack: s, + ep: ep, + wq: &wq, + cancel: make(chan struct{}), + }, nil +} + +// Close implements net.Listener.Close. +func (l *Listener) Close() error { + l.ep.Close() + return nil +} + +// Shutdown stops the HTTP server. +func (l *Listener) Shutdown() { + l.ep.Shutdown(tcpip.ShutdownWrite | tcpip.ShutdownRead) + close(l.cancel) // broadcast cancellation +} + +// Addr implements net.Listener.Addr. +func (l *Listener) Addr() net.Addr { + a, err := l.ep.GetLocalAddress() + if err != nil { + return nil + } + return fullToTCPAddr(a) +} + +type deadlineTimer struct { + // mu protects the fields below. + mu sync.Mutex + + readTimer *time.Timer + readCancelCh chan struct{} + writeTimer *time.Timer + writeCancelCh chan struct{} +} + +func (d *deadlineTimer) init() { + d.readCancelCh = make(chan struct{}) + d.writeCancelCh = make(chan struct{}) +} + +func (d *deadlineTimer) readCancel() <-chan struct{} { + d.mu.Lock() + c := d.readCancelCh + d.mu.Unlock() + return c +} +func (d *deadlineTimer) writeCancel() <-chan struct{} { + d.mu.Lock() + c := d.writeCancelCh + d.mu.Unlock() + return c +} + +// setDeadline contains the shared logic for setting a deadline. +// +// cancelCh and timer must be pointers to deadlineTimer.readCancelCh and +// deadlineTimer.readTimer or deadlineTimer.writeCancelCh and +// deadlineTimer.writeTimer. +// +// setDeadline must only be called while holding d.mu. +func (d *deadlineTimer) setDeadline(cancelCh *chan struct{}, timer **time.Timer, t time.Time) { + if *timer != nil && !(*timer).Stop() { + *cancelCh = make(chan struct{}) + } + + // Create a new channel if we already closed it due to setting an already + // expired time. We won't race with the timer because we already handled + // that above. + select { + case <-*cancelCh: + *cancelCh = make(chan struct{}) + default: + } + + // "A zero value for t means I/O operations will not time out." + // - net.Conn.SetDeadline + if t.IsZero() { + return + } + + timeout := t.Sub(time.Now()) + if timeout <= 0 { + close(*cancelCh) + return + } + + // Timer.Stop returns whether or not the AfterFunc has started, but + // does not indicate whether or not it has completed. Make a copy of + // the cancel channel to prevent this code from racing with the next + // call of setDeadline replacing *cancelCh. + ch := *cancelCh + *timer = time.AfterFunc(timeout, func() { + close(ch) + }) +} + +// SetReadDeadline implements net.Conn.SetReadDeadline and +// net.PacketConn.SetReadDeadline. +func (d *deadlineTimer) SetReadDeadline(t time.Time) error { + d.mu.Lock() + d.setDeadline(&d.readCancelCh, &d.readTimer, t) + d.mu.Unlock() + return nil +} + +// SetWriteDeadline implements net.Conn.SetWriteDeadline and +// net.PacketConn.SetWriteDeadline. +func (d *deadlineTimer) SetWriteDeadline(t time.Time) error { + d.mu.Lock() + d.setDeadline(&d.writeCancelCh, &d.writeTimer, t) + d.mu.Unlock() + return nil +} + +// SetDeadline implements net.Conn.SetDeadline and net.PacketConn.SetDeadline. +func (d *deadlineTimer) SetDeadline(t time.Time) error { + d.mu.Lock() + d.setDeadline(&d.readCancelCh, &d.readTimer, t) + d.setDeadline(&d.writeCancelCh, &d.writeTimer, t) + d.mu.Unlock() + return nil +} + +// A Conn is a wrapper around a tcpip.Endpoint that implements the net.Conn +// interface. +type Conn struct { + deadlineTimer + + wq *waiter.Queue + ep tcpip.Endpoint + + // readMu serializes reads and implicitly protects read. + // + // Lock ordering: + // If both readMu and deadlineTimer.mu are to be used in a single + // request, readMu must be acquired before deadlineTimer.mu. + readMu sync.Mutex + + // read contains bytes that have been read from the endpoint, + // but haven't yet been returned. + read buffer.View +} + +// NewConn creates a new Conn. +func NewConn(wq *waiter.Queue, ep tcpip.Endpoint) *Conn { + c := &Conn{ + wq: wq, + ep: ep, + } + c.deadlineTimer.init() + return c +} + +// Accept implements net.Conn.Accept. +func (l *Listener) Accept() (net.Conn, error) { + n, wq, err := l.ep.Accept() + + if err == tcpip.ErrWouldBlock { + // Create wait queue entry that notifies a channel. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + l.wq.EventRegister(&waitEntry, waiter.EventIn) + defer l.wq.EventUnregister(&waitEntry) + + for { + n, wq, err = l.ep.Accept() + + if err != tcpip.ErrWouldBlock { + break + } + + select { + case <-l.cancel: + return nil, errCanceled + case <-notifyCh: + } + } + } + + if err != nil { + return nil, &net.OpError{ + Op: "accept", + Net: "tcp", + Addr: l.Addr(), + Err: errors.New(err.String()), + } + } + + return NewConn(wq, n), nil +} + +type opErrorer interface { + newOpError(op string, err error) *net.OpError +} + +// commonRead implements the common logic between net.Conn.Read and +// net.PacketConn.ReadFrom. +func commonRead(ep tcpip.Endpoint, wq *waiter.Queue, deadline <-chan struct{}, addr *tcpip.FullAddress, errorer opErrorer) ([]byte, error) { + read, err := ep.Read(addr) + + if err == tcpip.ErrWouldBlock { + // Create wait queue entry that notifies a channel. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventIn) + defer wq.EventUnregister(&waitEntry) + for { + read, err = ep.Read(addr) + if err != tcpip.ErrWouldBlock { + break + } + select { + case <-deadline: + return nil, errorer.newOpError("read", &timeoutError{}) + case <-notifyCh: + } + } + } + + if err == tcpip.ErrClosedForReceive { + return nil, io.EOF + } + + if err != nil { + return nil, errorer.newOpError("read", errors.New(err.String())) + } + + return read, nil +} + +// Read implements net.Conn.Read. +func (c *Conn) Read(b []byte) (int, error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + deadline := c.readCancel() + + // Check if deadline has already expired. + select { + case <-deadline: + return 0, c.newOpError("read", &timeoutError{}) + default: + } + + if len(c.read) == 0 { + var err error + c.read, err = commonRead(c.ep, c.wq, deadline, nil, c) + if err != nil { + return 0, err + } + } + + n := copy(b, c.read) + c.read.TrimFront(n) + if len(c.read) == 0 { + c.read = nil + } + return n, nil +} + +// Write implements net.Conn.Write. +func (c *Conn) Write(b []byte) (int, error) { + deadline := c.writeCancel() + + // Check if deadlineTimer has already expired. + select { + case <-deadline: + return 0, c.newOpError("write", &timeoutError{}) + default: + } + + v := buffer.NewView(len(b)) + copy(v, b) + + // We must handle two soft failure conditions simultaneously: + // 1. Write may write nothing and return tcpip.ErrWouldBlock. + // If this happens, we need to register for notifications if we have + // not already and wait to try again. + // 2. Write may write fewer than the full number of bytes and return + // without error. In this case we need to try writing the remaining + // bytes again. I do not need to register for notifications. + // + // What is more, these two soft failure conditions can be interspersed. + // There is no guarantee that all of the condition #1s will occur before + // all of the condition #2s or visa-versa. + var ( + err *tcpip.Error + nbytes int + reg bool + notifyCh chan struct{} + ) + for nbytes < len(b) && (err == tcpip.ErrWouldBlock || err == nil) { + if err == tcpip.ErrWouldBlock { + if !reg { + // Only register once. + reg = true + + // Create wait queue entry that notifies a channel. + var waitEntry waiter.Entry + waitEntry, notifyCh = waiter.NewChannelEntry(nil) + c.wq.EventRegister(&waitEntry, waiter.EventOut) + defer c.wq.EventUnregister(&waitEntry) + } else { + // Don't wait immediately after registration in case more data + // became available between when we last checked and when we setup + // the notification. + select { + case <-deadline: + return nbytes, c.newOpError("write", &timeoutError{}) + case <-notifyCh: + } + } + } + + var n uintptr + n, err = c.ep.Write(tcpip.SlicePayload(v), tcpip.WriteOptions{}) + nbytes += int(n) + v.TrimFront(int(n)) + } + + if err == nil { + return nbytes, nil + } + + return nbytes, c.newOpError("write", errors.New(err.String())) +} + +// Close implements net.Conn.Close. +func (c *Conn) Close() error { + c.ep.Close() + return nil +} + +// LocalAddr implements net.Conn.LocalAddr. +func (c *Conn) LocalAddr() net.Addr { + a, err := c.ep.GetLocalAddress() + if err != nil { + return nil + } + return fullToTCPAddr(a) +} + +// RemoteAddr implements net.Conn.RemoteAddr. +func (c *Conn) RemoteAddr() net.Addr { + a, err := c.ep.GetRemoteAddress() + if err != nil { + return nil + } + return fullToTCPAddr(a) +} + +func (c *Conn) newOpError(op string, err error) *net.OpError { + return &net.OpError{ + Op: op, + Net: "tcp", + Source: c.LocalAddr(), + Addr: c.RemoteAddr(), + Err: err, + } +} + +func fullToTCPAddr(addr tcpip.FullAddress) *net.TCPAddr { + return &net.TCPAddr{IP: net.IP(addr.Addr), Port: int(addr.Port)} +} + +func fullToUDPAddr(addr tcpip.FullAddress) *net.UDPAddr { + return &net.UDPAddr{IP: net.IP(addr.Addr), Port: int(addr.Port)} +} + +// DialTCP creates a new TCP Conn connected to the specified address. +func DialTCP(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*Conn, error) { + // Create TCP endpoint, then connect. + var wq waiter.Queue + ep, err := s.NewEndpoint(tcp.ProtocolNumber, network, &wq) + if err != nil { + return nil, errors.New(err.String()) + } + + // Create wait queue entry that notifies a channel. + // + // We do this unconditionally as Connect will always return an error. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventOut) + defer wq.EventUnregister(&waitEntry) + + err = ep.Connect(addr) + if err == tcpip.ErrConnectStarted { + <-notifyCh + err = ep.GetSockOpt(tcpip.ErrorOption{}) + } + if err != nil { + ep.Close() + return nil, &net.OpError{ + Op: "connect", + Net: "tcp", + Addr: fullToTCPAddr(addr), + Err: errors.New(err.String()), + } + } + + return NewConn(&wq, ep), nil +} + +// A PacketConn is a wrapper around a tcpip endpoint that implements +// net.PacketConn. +type PacketConn struct { + deadlineTimer + + stack *stack.Stack + ep tcpip.Endpoint + wq *waiter.Queue +} + +// NewPacketConn creates a new PacketConn. +func NewPacketConn(s *stack.Stack, addr tcpip.FullAddress, network tcpip.NetworkProtocolNumber) (*PacketConn, error) { + // Create UDP endpoint and bind it. + var wq waiter.Queue + ep, err := s.NewEndpoint(udp.ProtocolNumber, network, &wq) + if err != nil { + return nil, errors.New(err.String()) + } + + if err := ep.Bind(addr, nil); err != nil { + ep.Close() + return nil, &net.OpError{ + Op: "bind", + Net: "udp", + Addr: fullToUDPAddr(addr), + Err: errors.New(err.String()), + } + } + + c := &PacketConn{ + stack: s, + ep: ep, + wq: &wq, + } + c.deadlineTimer.init() + return c, nil +} + +func (c *PacketConn) newOpError(op string, err error) *net.OpError { + return c.newRemoteOpError(op, nil, err) +} + +func (c *PacketConn) newRemoteOpError(op string, remote net.Addr, err error) *net.OpError { + return &net.OpError{ + Op: op, + Net: "udp", + Source: c.LocalAddr(), + Addr: remote, + Err: err, + } +} + +// ReadFrom implements net.PacketConn.ReadFrom. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + deadline := c.readCancel() + + // Check if deadline has already expired. + select { + case <-deadline: + return 0, nil, c.newOpError("read", &timeoutError{}) + default: + } + + var addr tcpip.FullAddress + read, err := commonRead(c.ep, c.wq, deadline, &addr, c) + if err != nil { + return 0, nil, err + } + + return copy(b, read), fullToUDPAddr(addr), nil +} + +// WriteTo implements net.PacketConn.WriteTo. +func (c *PacketConn) WriteTo(b []byte, addr net.Addr) (int, error) { + deadline := c.writeCancel() + + // Check if deadline has already expired. + select { + case <-deadline: + return 0, c.newRemoteOpError("write", addr, &timeoutError{}) + default: + } + + ua := addr.(*net.UDPAddr) + fullAddr := tcpip.FullAddress{Addr: tcpip.Address(ua.IP), Port: uint16(ua.Port)} + + v := buffer.NewView(len(b)) + copy(v, b) + + wopts := tcpip.WriteOptions{To: &fullAddr} + n, err := c.ep.Write(tcpip.SlicePayload(v), wopts) + + if err == tcpip.ErrWouldBlock { + // Create wait queue entry that notifies a channel. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + c.wq.EventRegister(&waitEntry, waiter.EventOut) + defer c.wq.EventUnregister(&waitEntry) + for { + n, err = c.ep.Write(tcpip.SlicePayload(v), wopts) + if err != tcpip.ErrWouldBlock { + break + } + select { + case <-deadline: + return int(n), c.newRemoteOpError("write", addr, &timeoutError{}) + case <-notifyCh: + } + } + } + + if err == nil { + return int(n), nil + } + + return int(n), c.newRemoteOpError("write", addr, errors.New(err.String())) +} + +// Close implements net.PacketConn.Close. +func (c *PacketConn) Close() error { + c.ep.Close() + return nil +} + +// LocalAddr implements net.PacketConn.LocalAddr. +func (c *PacketConn) LocalAddr() net.Addr { + a, err := c.ep.GetLocalAddress() + if err != nil { + return nil + } + return fullToUDPAddr(a) +} diff --git a/pkg/tcpip/adapters/gonet/gonet_test.go b/pkg/tcpip/adapters/gonet/gonet_test.go new file mode 100644 index 000000000..2f86469eb --- /dev/null +++ b/pkg/tcpip/adapters/gonet/gonet_test.go @@ -0,0 +1,424 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gonet + +import ( + "fmt" + "net" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/nettest" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/loopback" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + NICID = 1 +) + +func TestTimeouts(t *testing.T) { + nc := NewConn(nil, nil) + dlfs := []struct { + name string + f func(time.Time) error + }{ + {"SetDeadline", nc.SetDeadline}, + {"SetReadDeadline", nc.SetReadDeadline}, + {"SetWriteDeadline", nc.SetWriteDeadline}, + } + + for _, dlf := range dlfs { + if err := dlf.f(time.Time{}); err != nil { + t.Errorf("got %s(time.Time{}) = %v, want = %v", dlf.name, err, nil) + } + } +} + +func newLoopbackStack() (*stack.Stack, *tcpip.Error) { + // Create the stack and add a NIC. + s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName, udp.ProtocolName}) + + if err := s.CreateNIC(NICID, loopback.New()); err != nil { + return nil, err + } + + // Add default route. + s.SetRouteTable([]tcpip.Route{ + // IPv4 + { + Destination: tcpip.Address(strings.Repeat("\x00", 4)), + Mask: tcpip.Address(strings.Repeat("\x00", 4)), + Gateway: "", + NIC: NICID, + }, + + // IPv6 + { + Destination: tcpip.Address(strings.Repeat("\x00", 16)), + Mask: tcpip.Address(strings.Repeat("\x00", 16)), + Gateway: "", + NIC: NICID, + }, + }) + + return s, nil +} + +type testConnection struct { + wq *waiter.Queue + e *waiter.Entry + ch chan struct{} + ep tcpip.Endpoint +} + +func connect(s *stack.Stack, addr tcpip.FullAddress) (*testConnection, *tcpip.Error) { + wq := &waiter.Queue{} + ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + + entry, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&entry, waiter.EventOut) + + err = ep.Connect(addr) + if err == tcpip.ErrConnectStarted { + <-ch + err = ep.GetSockOpt(tcpip.ErrorOption{}) + } + if err != nil { + return nil, err + } + + wq.EventUnregister(&entry) + wq.EventRegister(&entry, waiter.EventIn) + + return &testConnection{wq, &entry, ch, ep}, nil +} + +func (c *testConnection) close() { + c.wq.EventUnregister(c.e) + c.ep.Close() +} + +// TestCloseReader tests that Conn.Close() causes Conn.Read() to unblock. +func TestCloseReader(t *testing.T) { + s, err := newLoopbackStack() + if err != nil { + t.Fatalf("newLoopbackStack() = %v", err) + } + + addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211} + + s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr) + + l, e := NewListener(s, addr, ipv4.ProtocolNumber) + if e != nil { + t.Fatalf("NewListener() = %v", e) + } + done := make(chan struct{}) + go func() { + defer close(done) + c, err := l.Accept() + if err != nil { + t.Fatalf("l.Accept() = %v", err) + } + + // Give c.Read() a chance to block before closing the connection. + time.AfterFunc(time.Millisecond*50, func() { + c.Close() + }) + + buf := make([]byte, 256) + n, err := c.Read(buf) + got, ok := err.(*net.OpError) + want := tcpip.ErrConnectionAborted + if n != 0 || !ok || got.Err.Error() != want.String() { + t.Errorf("c.Read() = (%d, %v), want (0, OpError(%v))", n, err, want) + } + }() + sender, err := connect(s, addr) + if err != nil { + t.Fatalf("connect() = %v", err) + } + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Errorf("c.Read() didn't unblock") + } + sender.close() +} + +// TestCloseReaderWithForwarder tests that Conn.Close() wakes Conn.Read() when +// using tcp.Forwarder. +func TestCloseReaderWithForwarder(t *testing.T) { + s, err := newLoopbackStack() + if err != nil { + t.Fatalf("newLoopbackStack() = %v", err) + } + + addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211} + s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr) + + done := make(chan struct{}) + + fwd := tcp.NewForwarder(s, 30000, 10, func(r *tcp.ForwarderRequest) { + defer close(done) + + var wq waiter.Queue + ep, err := r.CreateEndpoint(&wq) + if err != nil { + t.Fatalf("r.CreateEndpoint() = %v", err) + } + defer ep.Close() + r.Complete(false) + + c := NewConn(&wq, ep) + + // Give c.Read() a chance to block before closing the connection. + time.AfterFunc(time.Millisecond*50, func() { + c.Close() + }) + + buf := make([]byte, 256) + n, e := c.Read(buf) + got, ok := e.(*net.OpError) + want := tcpip.ErrConnectionAborted + if n != 0 || !ok || got.Err.Error() != want.String() { + t.Errorf("c.Read() = (%d, %v), want (0, OpError(%v))", n, e, want) + } + }) + s.SetTransportProtocolHandler(tcp.ProtocolNumber, fwd.HandlePacket) + + sender, err := connect(s, addr) + if err != nil { + t.Fatalf("connect() = %v", err) + } + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Errorf("c.Read() didn't unblock") + } + sender.close() +} + +// TestDeadlineChange tests that changing the deadline affects currently blocked reads. +func TestDeadlineChange(t *testing.T) { + s, err := newLoopbackStack() + if err != nil { + t.Fatalf("newLoopbackStack() = %v", err) + } + + addr := tcpip.FullAddress{NICID, tcpip.Address(net.IPv4(169, 254, 10, 1).To4()), 11211} + + s.AddAddress(NICID, ipv4.ProtocolNumber, addr.Addr) + + l, e := NewListener(s, addr, ipv4.ProtocolNumber) + if e != nil { + t.Fatalf("NewListener() = %v", e) + } + done := make(chan struct{}) + go func() { + defer close(done) + c, err := l.Accept() + if err != nil { + t.Fatalf("l.Accept() = %v", err) + } + + c.SetDeadline(time.Now().Add(time.Minute)) + // Give c.Read() a chance to block before closing the connection. + time.AfterFunc(time.Millisecond*50, func() { + c.SetDeadline(time.Now().Add(time.Millisecond * 10)) + }) + + buf := make([]byte, 256) + n, err := c.Read(buf) + got, ok := err.(*net.OpError) + want := "i/o timeout" + if n != 0 || !ok || got.Err == nil || got.Err.Error() != want { + t.Errorf("c.Read() = (%d, %v), want (0, OpError(%s))", n, err, want) + } + }() + sender, err := connect(s, addr) + if err != nil { + t.Fatalf("connect() = %v", err) + } + + select { + case <-done: + case <-time.After(time.Millisecond * 500): + t.Errorf("c.Read() didn't unblock") + } + sender.close() +} + +func TestPacketConnTransfer(t *testing.T) { + s, e := newLoopbackStack() + if e != nil { + t.Fatalf("newLoopbackStack() = %v", e) + } + + ip1 := tcpip.Address(net.IPv4(169, 254, 10, 1).To4()) + addr1 := tcpip.FullAddress{NICID, ip1, 11211} + s.AddAddress(NICID, ipv4.ProtocolNumber, ip1) + ip2 := tcpip.Address(net.IPv4(169, 254, 10, 2).To4()) + addr2 := tcpip.FullAddress{NICID, ip2, 11311} + s.AddAddress(NICID, ipv4.ProtocolNumber, ip2) + + c1, err := NewPacketConn(s, addr1, ipv4.ProtocolNumber) + if err != nil { + t.Fatal("NewPacketConn(port 4):", err) + } + c2, err := NewPacketConn(s, addr2, ipv4.ProtocolNumber) + if err != nil { + t.Fatal("NewPacketConn(port 5):", err) + } + + c1.SetDeadline(time.Now().Add(time.Second)) + c2.SetDeadline(time.Now().Add(time.Second)) + + sent := "abc123" + sendAddr := fullToUDPAddr(addr2) + if n, err := c1.WriteTo([]byte(sent), sendAddr); err != nil || n != len(sent) { + t.Errorf("got c1.WriteTo(%q, %v) = %d, %v, want = %d, %v", sent, sendAddr, n, err, len(sent), nil) + } + recv := make([]byte, len(sent)) + n, recvAddr, err := c2.ReadFrom(recv) + if err != nil || n != len(recv) { + t.Errorf("got c2.ReadFrom() = %d, %v, want = %d, %v", n, err, len(recv), nil) + } + + if recv := string(recv); recv != sent { + t.Errorf("got recv = %q, want = %q", recv, sent) + } + + if want := fullToUDPAddr(addr1); !reflect.DeepEqual(recvAddr, want) { + t.Errorf("got recvAddr = %v, want = %v", recvAddr, want) + } + + if err := c1.Close(); err != nil { + t.Error("c1.Close():", err) + } + if err := c2.Close(); err != nil { + t.Error("c2.Close():", err) + } +} + +func makePipe() (c1, c2 net.Conn, stop func(), err error) { + s, e := newLoopbackStack() + if e != nil { + return nil, nil, nil, fmt.Errorf("newLoopbackStack() = %v", e) + } + + ip := tcpip.Address(net.IPv4(169, 254, 10, 1).To4()) + addr := tcpip.FullAddress{NICID, ip, 11211} + s.AddAddress(NICID, ipv4.ProtocolNumber, ip) + + l, err := NewListener(s, addr, ipv4.ProtocolNumber) + if err != nil { + return nil, nil, nil, fmt.Errorf("NewListener: %v", err) + } + + c1, err = DialTCP(s, addr, ipv4.ProtocolNumber) + if err != nil { + l.Close() + return nil, nil, nil, fmt.Errorf("DialTCP: %v", err) + } + + c2, err = l.Accept() + if err != nil { + l.Close() + c1.Close() + return nil, nil, nil, fmt.Errorf("l.Accept: %v", err) + } + + stop = func() { + c1.Close() + c2.Close() + } + + if err := l.Close(); err != nil { + stop() + return nil, nil, nil, fmt.Errorf("l.Close(): %v", err) + } + + return c1, c2, stop, nil +} + +func TestTCPConnTransfer(t *testing.T) { + c1, c2, _, err := makePipe() + if err != nil { + t.Fatal(err) + } + defer func() { + if err := c1.Close(); err != nil { + t.Error("c1.Close():", err) + } + if err := c2.Close(); err != nil { + t.Error("c2.Close():", err) + } + }() + + c1.SetDeadline(time.Now().Add(time.Second)) + c2.SetDeadline(time.Now().Add(time.Second)) + + const sent = "abc123" + + tests := []struct { + name string + c1 net.Conn + c2 net.Conn + }{ + {"connected to accepted", c1, c2}, + {"accepted to connected", c2, c1}, + } + + for _, test := range tests { + if n, err := test.c1.Write([]byte(sent)); err != nil || n != len(sent) { + t.Errorf("%s: got test.c1.Write(%q) = %d, %v, want = %d, %v", test.name, sent, n, err, len(sent), nil) + continue + } + + recv := make([]byte, len(sent)) + n, err := test.c2.Read(recv) + if err != nil || n != len(recv) { + t.Errorf("%s: got test.c2.Read() = %d, %v, want = %d, %v", test.name, n, err, len(recv), nil) + continue + } + + if recv := string(recv); recv != sent { + t.Errorf("%s: got recv = %q, want = %q", test.name, recv, sent) + } + } +} + +func TestTCPDialError(t *testing.T) { + s, e := newLoopbackStack() + if e != nil { + t.Fatalf("newLoopbackStack() = %v", e) + } + + ip := tcpip.Address(net.IPv4(169, 254, 10, 1).To4()) + addr := tcpip.FullAddress{NICID, ip, 11211} + + _, err := DialTCP(s, addr, ipv4.ProtocolNumber) + got, ok := err.(*net.OpError) + want := tcpip.ErrNoRoute + if !ok || got.Err.Error() != want.String() { + t.Errorf("Got DialTCP() = %v, want = %v", err, tcpip.ErrNoRoute) + } +} + +func TestNetTest(t *testing.T) { + nettest.TestConn(t, makePipe) +} diff --git a/pkg/tcpip/buffer/BUILD b/pkg/tcpip/buffer/BUILD new file mode 100644 index 000000000..055e4b953 --- /dev/null +++ b/pkg/tcpip/buffer/BUILD @@ -0,0 +1,32 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "buffer_state", + srcs = [ + "view.go", + ], + out = "buffer_state.go", + package = "buffer", +) + +go_library( + name = "buffer", + srcs = [ + "buffer_state.go", + "prependable.go", + "view.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer", + visibility = ["//visibility:public"], + deps = ["//pkg/state"], +) + +go_test( + name = "buffer_test", + size = "small", + srcs = ["view_test.go"], + embed = [":buffer"], +) diff --git a/pkg/tcpip/buffer/prependable.go b/pkg/tcpip/buffer/prependable.go new file mode 100644 index 000000000..fd84585f9 --- /dev/null +++ b/pkg/tcpip/buffer/prependable.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buffer + +// Prependable is a buffer that grows backwards, that is, more data can be +// prepended to it. It is useful when building networking packets, where each +// protocol adds its own headers to the front of the higher-level protocol +// header and payload; for example, TCP would prepend its header to the payload, +// then IP would prepend its own, then ethernet. +type Prependable struct { + // Buf is the buffer backing the prependable buffer. + buf View + + // usedIdx is the index where the used part of the buffer begins. + usedIdx int +} + +// NewPrependable allocates a new prependable buffer with the given size. +func NewPrependable(size int) Prependable { + return Prependable{buf: NewView(size), usedIdx: size} +} + +// Prepend reserves the requested space in front of the buffer, returning a +// slice that represents the reserved space. +func (p *Prependable) Prepend(size int) []byte { + if size > p.usedIdx { + return nil + } + + p.usedIdx -= size + return p.buf[p.usedIdx:][:size:size] +} + +// View returns a View of the backing buffer that contains all prepended +// data so far. +func (p *Prependable) View() View { + v := p.buf + v.TrimFront(p.usedIdx) + return v +} + +// UsedBytes returns a slice of the backing buffer that contains all prepended +// data so far. +func (p *Prependable) UsedBytes() []byte { + return p.buf[p.usedIdx:] +} + +// UsedLength returns the number of bytes used so far. +func (p *Prependable) UsedLength() int { + return len(p.buf) - p.usedIdx +} diff --git a/pkg/tcpip/buffer/view.go b/pkg/tcpip/buffer/view.go new file mode 100644 index 000000000..241ccc7a8 --- /dev/null +++ b/pkg/tcpip/buffer/view.go @@ -0,0 +1,181 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buffer provides the implementation of a buffer view. +package buffer + +// View is a slice of a buffer, with convenience methods. +type View []byte + +// NewView allocates a new buffer and returns an initialized view that covers +// the whole buffer. +func NewView(size int) View { + return make(View, size) +} + +// NewViewFromBytes allocates a new buffer and copies in the given bytes. +func NewViewFromBytes(b []byte) View { + return append(View(nil), b...) +} + +// TrimFront removes the first "count" bytes from the visible section of the +// buffer. +func (v *View) TrimFront(count int) { + *v = (*v)[count:] +} + +// CapLength irreversibly reduces the length of the visible section of the +// buffer to the value specified. +func (v *View) CapLength(length int) { + // We also set the slice cap because if we don't, one would be able to + // expand the view back to include the region just excluded. We want to + // prevent that to avoid potential data leak if we have uninitialized + // data in excluded region. + *v = (*v)[:length:length] +} + +// ToVectorisedView transforms a View in a VectorisedView from an +// already-allocated slice of View. +func (v *View) ToVectorisedView(views [1]View) VectorisedView { + views[0] = *v + return NewVectorisedView(len(*v), views[:]) +} + +// VectorisedView is a vectorised version of View using non contigous memory. +// It supports all the convenience methods supported by View. +type VectorisedView struct { + views []View + size int +} + +// NewVectorisedView creates a new vectorised view from an already-allocated slice +// of View and sets its size. +func NewVectorisedView(size int, views []View) VectorisedView { + return VectorisedView{views: views, size: size} +} + +// TrimFront removes the first "count" bytes of the vectorised view. +func (vv *VectorisedView) TrimFront(count int) { + for count > 0 && len(vv.views) > 0 { + if count < len(vv.views[0]) { + vv.size -= count + vv.views[0].TrimFront(count) + return + } + count -= len(vv.views[0]) + vv.RemoveFirst() + } +} + +// CapLength irreversibly reduces the length of the vectorised view. +func (vv *VectorisedView) CapLength(length int) { + if length < 0 { + length = 0 + } + if vv.size < length { + return + } + vv.size = length + for i := range vv.views { + v := &vv.views[i] + if len(*v) >= length { + if length == 0 { + vv.views = vv.views[:i] + } else { + v.CapLength(length) + vv.views = vv.views[:i+1] + } + return + } + length -= len(*v) + } +} + +// Clone returns a clone of this VectorisedView. +// If the buffer argument is large enough to contain all the Views of this VectorisedView, +// the method will avoid allocations and use the buffer to store the Views of the clone. +func (vv *VectorisedView) Clone(buffer []View) VectorisedView { + var views []View + if len(buffer) >= len(vv.views) { + views = buffer[:len(vv.views)] + } else { + views = make([]View, len(vv.views)) + } + for i, v := range vv.views { + views[i] = v + } + return VectorisedView{views: views, size: vv.size} +} + +// First returns the first view of the vectorised view. +// It panics if the vectorised view is empty. +func (vv *VectorisedView) First() View { + if len(vv.views) == 0 { + return nil + } + return vv.views[0] +} + +// RemoveFirst removes the first view of the vectorised view. +func (vv *VectorisedView) RemoveFirst() { + if len(vv.views) == 0 { + return + } + vv.size -= len(vv.views[0]) + vv.views = vv.views[1:] +} + +// SetSize unsafely sets the size of the VectorisedView. +func (vv *VectorisedView) SetSize(size int) { + vv.size = size +} + +// SetViews unsafely sets the views of the VectorisedView. +func (vv *VectorisedView) SetViews(views []View) { + vv.views = views +} + +// Size returns the size in bytes of the entire content stored in the vectorised view. +func (vv *VectorisedView) Size() int { + return vv.size +} + +// ToView returns the a single view containing the content of the vectorised view. +func (vv *VectorisedView) ToView() View { + v := make([]byte, vv.size) + u := v + for i := range vv.views { + n := copy(u, vv.views[i]) + u = u[n:] + } + return v +} + +// Views returns the slice containing the all views. +func (vv *VectorisedView) Views() []View { + return vv.views +} + +// ByteSlice returns a slice containing the all views as a []byte. +func (vv *VectorisedView) ByteSlice() [][]byte { + s := make([][]byte, len(vv.views)) + for i := range vv.views { + s[i] = []byte(vv.views[i]) + } + return s +} + +// copy returns a deep-copy of the vectorised view. +// It is an expensive method that should be used only in tests. +func (vv *VectorisedView) copy() *VectorisedView { + uu := &VectorisedView{ + views: make([]View, len(vv.views)), + size: vv.size, + } + for i, v := range vv.views { + uu.views[i] = make(View, len(v)) + copy(uu.views[i], v) + } + return uu +} diff --git a/pkg/tcpip/buffer/view_test.go b/pkg/tcpip/buffer/view_test.go new file mode 100644 index 000000000..ff8535ba5 --- /dev/null +++ b/pkg/tcpip/buffer/view_test.go @@ -0,0 +1,212 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buffer_test contains tests for the VectorisedView type. +package buffer + +import ( + "reflect" + "testing" +) + +// vv is an helper to build VectorisedView from different strings. +func vv(size int, pieces ...string) *VectorisedView { + views := make([]View, len(pieces)) + for i, p := range pieces { + views[i] = []byte(p) + } + + vv := NewVectorisedView(size, views) + return &vv +} + +var capLengthTestCases = []struct { + comment string + in *VectorisedView + length int + want *VectorisedView +}{ + { + comment: "Simple case", + in: vv(2, "12"), + length: 1, + want: vv(1, "1"), + }, + { + comment: "Case spanning across two Views", + in: vv(4, "123", "4"), + length: 2, + want: vv(2, "12"), + }, + { + comment: "Corner case with negative length", + in: vv(1, "1"), + length: -1, + want: vv(0), + }, + { + comment: "Corner case with length = 0", + in: vv(3, "12", "3"), + length: 0, + want: vv(0), + }, + { + comment: "Corner case with length = size", + in: vv(1, "1"), + length: 1, + want: vv(1, "1"), + }, + { + comment: "Corner case with length > size", + in: vv(1, "1"), + length: 2, + want: vv(1, "1"), + }, +} + +func TestCapLength(t *testing.T) { + for _, c := range capLengthTestCases { + orig := c.in.copy() + c.in.CapLength(c.length) + if !reflect.DeepEqual(c.in, c.want) { + t.Errorf("Test \"%s\" failed when calling CapLength(%d) on %v. Got %v. Want %v", + c.comment, c.length, orig, c.in, c.want) + } + } +} + +var trimFrontTestCases = []struct { + comment string + in *VectorisedView + count int + want *VectorisedView +}{ + { + comment: "Simple case", + in: vv(2, "12"), + count: 1, + want: vv(1, "2"), + }, + { + comment: "Case where we trim an entire View", + in: vv(2, "1", "2"), + count: 1, + want: vv(1, "2"), + }, + { + comment: "Case spanning across two Views", + in: vv(3, "1", "23"), + count: 2, + want: vv(1, "3"), + }, + { + comment: "Corner case with negative count", + in: vv(1, "1"), + count: -1, + want: vv(1, "1"), + }, + { + comment: " Corner case with count = 0", + in: vv(1, "1"), + count: 0, + want: vv(1, "1"), + }, + { + comment: "Corner case with count = size", + in: vv(1, "1"), + count: 1, + want: vv(0), + }, + { + comment: "Corner case with count > size", + in: vv(1, "1"), + count: 2, + want: vv(0), + }, +} + +func TestTrimFront(t *testing.T) { + for _, c := range trimFrontTestCases { + orig := c.in.copy() + c.in.TrimFront(c.count) + if !reflect.DeepEqual(c.in, c.want) { + t.Errorf("Test \"%s\" failed when calling TrimFront(%d) on %v. Got %v. Want %v", + c.comment, c.count, orig, c.in, c.want) + } + } +} + +var toViewCases = []struct { + comment string + in *VectorisedView + want View +}{ + { + comment: "Simple case", + in: vv(2, "12"), + want: []byte("12"), + }, + { + comment: "Case with multiple views", + in: vv(2, "1", "2"), + want: []byte("12"), + }, + { + comment: "Empty case", + in: vv(0), + want: []byte(""), + }, +} + +func TestToView(t *testing.T) { + for _, c := range toViewCases { + got := c.in.ToView() + if !reflect.DeepEqual(got, c.want) { + t.Errorf("Test \"%s\" failed when calling ToView() on %v. Got %v. Want %v", + c.comment, c.in, got, c.want) + } + } +} + +var toCloneCases = []struct { + comment string + inView *VectorisedView + inBuffer []View +}{ + { + comment: "Simple case", + inView: vv(1, "1"), + inBuffer: make([]View, 1), + }, + { + comment: "Case with multiple views", + inView: vv(2, "1", "2"), + inBuffer: make([]View, 2), + }, + { + comment: "Case with buffer too small", + inView: vv(2, "1", "2"), + inBuffer: make([]View, 1), + }, + { + comment: "Case with buffer larger than needed", + inView: vv(1, "1"), + inBuffer: make([]View, 2), + }, + { + comment: "Case with nil buffer", + inView: vv(1, "1"), + inBuffer: nil, + }, +} + +func TestToClone(t *testing.T) { + for _, c := range toCloneCases { + got := c.inView.Clone(c.inBuffer) + if !reflect.DeepEqual(&got, c.inView) { + t.Errorf("Test \"%s\" failed when calling Clone(%v) on %v. Got %v. Want %v", + c.comment, c.inBuffer, c.inView, got, c.inView) + } + } +} diff --git a/pkg/tcpip/checker/BUILD b/pkg/tcpip/checker/BUILD new file mode 100644 index 000000000..ac5203031 --- /dev/null +++ b/pkg/tcpip/checker/BUILD @@ -0,0 +1,16 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "checker", + testonly = 1, + srcs = ["checker.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/checker", + visibility = ["//visibility:public"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/header", + "//pkg/tcpip/seqnum", + ], +) diff --git a/pkg/tcpip/checker/checker.go b/pkg/tcpip/checker/checker.go new file mode 100644 index 000000000..209f9d60b --- /dev/null +++ b/pkg/tcpip/checker/checker.go @@ -0,0 +1,517 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker provides helper functions to check networking packets for +// validity. +package checker + +import ( + "encoding/binary" + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +// NetworkChecker is a function to check a property of a network packet. +type NetworkChecker func(*testing.T, []header.Network) + +// TransportChecker is a function to check a property of a transport packet. +type TransportChecker func(*testing.T, header.Transport) + +// IPv4 checks the validity and properties of the given IPv4 packet. It is +// expected to be used in conjunction with other network checkers for specific +// properties. For example, to check the source and destination address, one +// would call: +// +// checker.IPv4(t, b, checker.SrcAddr(x), checker.DstAddr(y)) +func IPv4(t *testing.T, b []byte, checkers ...NetworkChecker) { + ipv4 := header.IPv4(b) + + if !ipv4.IsValid(len(b)) { + t.Fatalf("Not a valid IPv4 packet") + } + + xsum := ipv4.CalculateChecksum() + if xsum != 0 && xsum != 0xffff { + t.Fatalf("Bad checksum: 0x%x, checksum in packet: 0x%x", xsum, ipv4.Checksum()) + } + + for _, f := range checkers { + f(t, []header.Network{ipv4}) + } +} + +// IPv6 checks the validity and properties of the given IPv6 packet. The usage +// is similar to IPv4. +func IPv6(t *testing.T, b []byte, checkers ...NetworkChecker) { + ipv6 := header.IPv6(b) + if !ipv6.IsValid(len(b)) { + t.Fatalf("Not a valid IPv6 packet") + } + + for _, f := range checkers { + f(t, []header.Network{ipv6}) + } +} + +// SrcAddr creates a checker that checks the source address. +func SrcAddr(addr tcpip.Address) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if a := h[0].SourceAddress(); a != addr { + t.Fatalf("Bad source address, got %v, want %v", a, addr) + } + } +} + +// DstAddr creates a checker that checks the destination address. +func DstAddr(addr tcpip.Address) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if a := h[0].DestinationAddress(); a != addr { + t.Fatalf("Bad destination address, got %v, want %v", a, addr) + } + } +} + +// PayloadLen creates a checker that checks the payload length. +func PayloadLen(plen int) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if l := len(h[0].Payload()); l != plen { + t.Fatalf("Bad payload length, got %v, want %v", l, plen) + } + } +} + +// FragmentOffset creates a checker that checks the FragmentOffset field. +func FragmentOffset(offset uint16) NetworkChecker { + return func(t *testing.T, h []header.Network) { + // We only do this of IPv4 for now. + switch ip := h[0].(type) { + case header.IPv4: + if v := ip.FragmentOffset(); v != offset { + t.Fatalf("Bad fragment offset, got %v, want %v", v, offset) + } + } + } +} + +// FragmentFlags creates a checker that checks the fragment flags field. +func FragmentFlags(flags uint8) NetworkChecker { + return func(t *testing.T, h []header.Network) { + // We only do this of IPv4 for now. + switch ip := h[0].(type) { + case header.IPv4: + if v := ip.Flags(); v != flags { + t.Fatalf("Bad fragment offset, got %v, want %v", v, flags) + } + } + } +} + +// TOS creates a checker that checks the TOS field. +func TOS(tos uint8, label uint32) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if v, l := h[0].TOS(); v != tos || l != label { + t.Fatalf("Bad TOS, got (%v, %v), want (%v,%v)", v, l, tos, label) + } + } +} + +// Raw creates a checker that checks the bytes of payload. +// The checker always checks the payload of the last network header. +// For instance, in case of IPv6 fragments, the payload that will be checked +// is the one containing the actual data that the packet is carrying, without +// the bytes added by the IPv6 fragmentation. +func Raw(want []byte) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if got := h[len(h)-1].Payload(); !reflect.DeepEqual(got, want) { + t.Fatalf("Wrong payload, got %v, want %v", got, want) + } + } +} + +// IPv6Fragment creates a checker that validates an IPv6 fragment. +func IPv6Fragment(checkers ...NetworkChecker) NetworkChecker { + return func(t *testing.T, h []header.Network) { + if p := h[0].TransportProtocol(); p != header.IPv6FragmentHeader { + t.Fatalf("Bad protocol, got %v, want %v", p, header.UDPProtocolNumber) + } + + ipv6Frag := header.IPv6Fragment(h[0].Payload()) + if !ipv6Frag.IsValid() { + t.Fatalf("Not a valid IPv6 fragment") + } + + for _, f := range checkers { + f(t, []header.Network{h[0], ipv6Frag}) + } + } +} + +// TCP creates a checker that checks that the transport protocol is TCP and +// potentially additional transport header fields. +func TCP(checkers ...TransportChecker) NetworkChecker { + return func(t *testing.T, h []header.Network) { + first := h[0] + last := h[len(h)-1] + + if p := last.TransportProtocol(); p != header.TCPProtocolNumber { + t.Fatalf("Bad protocol, got %v, want %v", p, header.TCPProtocolNumber) + } + + // Verify the checksum. + tcp := header.TCP(last.Payload()) + l := uint16(len(tcp)) + + xsum := header.Checksum([]byte(first.SourceAddress()), 0) + xsum = header.Checksum([]byte(first.DestinationAddress()), xsum) + xsum = header.Checksum([]byte{0, byte(last.TransportProtocol())}, xsum) + xsum = header.Checksum([]byte{byte(l >> 8), byte(l)}, xsum) + xsum = header.Checksum(tcp, xsum) + + if xsum != 0 && xsum != 0xffff { + t.Fatalf("Bad checksum: 0x%x, checksum in segment: 0x%x", xsum, tcp.Checksum()) + } + + // Run the transport checkers. + for _, f := range checkers { + f(t, tcp) + } + } +} + +// UDP creates a checker that checks that the transport protocol is UDP and +// potentially additional transport header fields. +func UDP(checkers ...TransportChecker) NetworkChecker { + return func(t *testing.T, h []header.Network) { + last := h[len(h)-1] + + if p := last.TransportProtocol(); p != header.UDPProtocolNumber { + t.Fatalf("Bad protocol, got %v, want %v", p, header.UDPProtocolNumber) + } + + udp := header.UDP(last.Payload()) + for _, f := range checkers { + f(t, udp) + } + } +} + +// SrcPort creates a checker that checks the source port. +func SrcPort(port uint16) TransportChecker { + return func(t *testing.T, h header.Transport) { + if p := h.SourcePort(); p != port { + t.Fatalf("Bad source port, got %v, want %v", p, port) + } + } +} + +// DstPort creates a checker that checks the destination port. +func DstPort(port uint16) TransportChecker { + return func(t *testing.T, h header.Transport) { + if p := h.DestinationPort(); p != port { + t.Fatalf("Bad destination port, got %v, want %v", p, port) + } + } +} + +// SeqNum creates a checker that checks the sequence number. +func SeqNum(seq uint32) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + + if s := tcp.SequenceNumber(); s != seq { + t.Fatalf("Bad sequence number, got %v, want %v", s, seq) + } + } +} + +// AckNum creates a checker that checks the ack number. +func AckNum(seq uint32) TransportChecker { + return func(t *testing.T, h header.Transport) { + t.Helper() + tcp, ok := h.(header.TCP) + if !ok { + return + } + + if s := tcp.AckNumber(); s != seq { + t.Fatalf("Bad ack number, got %v, want %v", s, seq) + } + } +} + +// Window creates a checker that checks the tcp window. +func Window(window uint16) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + + if w := tcp.WindowSize(); w != window { + t.Fatalf("Bad window, got 0x%x, want 0x%x", w, window) + } + } +} + +// TCPFlags creates a checker that checks the tcp flags. +func TCPFlags(flags uint8) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + + if f := tcp.Flags(); f != flags { + t.Fatalf("Bad flags, got 0x%x, want 0x%x", f, flags) + } + } +} + +// TCPFlagsMatch creates a checker that checks that the tcp flags, masked by the +// given mask, match the supplied flags. +func TCPFlagsMatch(flags, mask uint8) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + + if f := tcp.Flags(); (f & mask) != (flags & mask) { + t.Fatalf("Bad masked flags, got 0x%x, want 0x%x, mask 0x%x", f, flags, mask) + } + } +} + +// TCPSynOptions creates a checker that checks the presence of TCP options in +// SYN segments. +// +// If wndscale is negative, the window scale option must not be present. +func TCPSynOptions(wantOpts header.TCPSynOptions) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + opts := tcp.Options() + limit := len(opts) + foundMSS := false + foundWS := false + foundTS := false + foundSACKPermitted := false + tsVal := uint32(0) + tsEcr := uint32(0) + for i := 0; i < limit; { + switch opts[i] { + case header.TCPOptionEOL: + i = limit + case header.TCPOptionNOP: + i++ + case header.TCPOptionMSS: + v := uint16(opts[i+2])<<8 | uint16(opts[i+3]) + if wantOpts.MSS != v { + t.Fatalf("Bad MSS: got %v, want %v", v, wantOpts.MSS) + } + foundMSS = true + i += 4 + case header.TCPOptionWS: + if wantOpts.WS < 0 { + t.Fatalf("WS present when it shouldn't be") + } + v := int(opts[i+2]) + if v != wantOpts.WS { + t.Fatalf("Bad WS: got %v, want %v", v, wantOpts.WS) + } + foundWS = true + i += 3 + case header.TCPOptionTS: + if i+9 >= limit { + t.Fatalf("TS Option truncated , option is only: %d bytes, want 10", limit-i) + } + if opts[i+1] != 10 { + t.Fatalf("Bad length %d for TS option, limit: %d", opts[i+1], limit) + } + tsVal = binary.BigEndian.Uint32(opts[i+2:]) + tsEcr = uint32(0) + if tcp.Flags()&header.TCPFlagAck != 0 { + // If the syn is an SYN-ACK then read + // the tsEcr value as well. + tsEcr = binary.BigEndian.Uint32(opts[i+6:]) + } + foundTS = true + i += 10 + case header.TCPOptionSACKPermitted: + if i+1 >= limit { + t.Fatalf("SACKPermitted option truncated, option is only : %d bytes, want 2", limit-i) + } + if opts[i+1] != 2 { + t.Fatalf("Bad length %d for SACKPermitted option, limit: %d", opts[i+1], limit) + } + foundSACKPermitted = true + i += 2 + + default: + i += int(opts[i+1]) + } + } + + if !foundMSS { + t.Fatalf("MSS option not found. Options: %x", opts) + } + + if !foundWS && wantOpts.WS >= 0 { + t.Fatalf("WS option not found. Options: %x", opts) + } + if wantOpts.TS && !foundTS { + t.Fatalf("TS option not found. Options: %x", opts) + } + if foundTS && tsVal == 0 { + t.Fatalf("TS option specified but the timestamp value is zero") + } + if foundTS && tsEcr == 0 && wantOpts.TSEcr != 0 { + t.Fatalf("TS option specified but TSEcr is incorrect: got %d, want: %d", tsEcr, wantOpts.TSEcr) + } + if wantOpts.SACKPermitted && !foundSACKPermitted { + t.Fatalf("SACKPermitted option not found. Options: %x", opts) + } + } +} + +// TCPTimestampChecker creates a checker that validates that a TCP segment has a +// TCP Timestamp option if wantTS is true, it also compares the wantTSVal and +// wantTSEcr values with those in the TCP segment (if present). +// +// If wantTSVal or wantTSEcr is zero then the corresponding comparison is +// skipped. +func TCPTimestampChecker(wantTS bool, wantTSVal uint32, wantTSEcr uint32) TransportChecker { + return func(t *testing.T, h header.Transport) { + tcp, ok := h.(header.TCP) + if !ok { + return + } + opts := []byte(tcp.Options()) + limit := len(opts) + foundTS := false + tsVal := uint32(0) + tsEcr := uint32(0) + for i := 0; i < limit; { + switch opts[i] { + case header.TCPOptionEOL: + i = limit + case header.TCPOptionNOP: + i++ + case header.TCPOptionTS: + if i+9 >= limit { + t.Fatalf("TS option found, but option is truncated, option length: %d, want 10 bytes", limit-i) + } + if opts[i+1] != 10 { + t.Fatalf("TS option found, but bad length specified: %d, want: 10", opts[i+1]) + } + tsVal = binary.BigEndian.Uint32(opts[i+2:]) + tsEcr = binary.BigEndian.Uint32(opts[i+6:]) + foundTS = true + i += 10 + default: + // We don't recognize this option, just skip over it. + if i+2 > limit { + return + } + l := int(opts[i+1]) + if i < 2 || i+l > limit { + return + } + i += l + } + } + + if wantTS != foundTS { + t.Fatalf("TS Option mismatch: got TS= %v, want TS= %v", foundTS, wantTS) + } + if wantTS && wantTSVal != 0 && wantTSVal != tsVal { + t.Fatalf("Timestamp value is incorrect: got: %d, want: %d", tsVal, wantTSVal) + } + if wantTS && wantTSEcr != 0 && tsEcr != wantTSEcr { + t.Fatalf("Timestamp Echo Reply is incorrect: got: %d, want: %d", tsEcr, wantTSEcr) + } + } +} + +// TCPNoSACKBlockChecker creates a checker that verifies that the segment does not +// contain any SACK blocks in the TCP options. +func TCPNoSACKBlockChecker() TransportChecker { + return TCPSACKBlockChecker(nil) +} + +// TCPSACKBlockChecker creates a checker that verifies that the segment does +// contain the specified SACK blocks in the TCP options. +func TCPSACKBlockChecker(sackBlocks []header.SACKBlock) TransportChecker { + return func(t *testing.T, h header.Transport) { + t.Helper() + tcp, ok := h.(header.TCP) + if !ok { + return + } + var gotSACKBlocks []header.SACKBlock + + opts := []byte(tcp.Options()) + limit := len(opts) + for i := 0; i < limit; { + switch opts[i] { + case header.TCPOptionEOL: + i = limit + case header.TCPOptionNOP: + i++ + case header.TCPOptionSACK: + if i+2 > limit { + // Malformed SACK block. + t.Fatalf("malformed SACK option in options: %v", opts) + } + sackOptionLen := int(opts[i+1]) + if i+sackOptionLen > limit || (sackOptionLen-2)%8 != 0 { + // Malformed SACK block. + t.Fatalf("malformed SACK option length in options: %v", opts) + } + numBlocks := sackOptionLen / 8 + for j := 0; j < numBlocks; j++ { + start := binary.BigEndian.Uint32(opts[i+2+j*8:]) + end := binary.BigEndian.Uint32(opts[i+2+j*8+4:]) + gotSACKBlocks = append(gotSACKBlocks, header.SACKBlock{ + Start: seqnum.Value(start), + End: seqnum.Value(end), + }) + } + i += sackOptionLen + default: + // We don't recognize this option, just skip over it. + if i+2 > limit { + break + } + l := int(opts[i+1]) + if l < 2 || i+l > limit { + break + } + i += l + } + } + + if !reflect.DeepEqual(gotSACKBlocks, sackBlocks) { + t.Fatalf("SACKBlocks are not equal, got: %v, want: %v", gotSACKBlocks, sackBlocks) + } + } +} + +// Payload creates a checker that checks the payload. +func Payload(want []byte) TransportChecker { + return func(t *testing.T, h header.Transport) { + if got := h.Payload(); !reflect.DeepEqual(got, want) { + t.Fatalf("Wrong payload, got %v, want %v", got, want) + } + } +} diff --git a/pkg/tcpip/header/BUILD b/pkg/tcpip/header/BUILD new file mode 100644 index 000000000..167ea250d --- /dev/null +++ b/pkg/tcpip/header/BUILD @@ -0,0 +1,51 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "tcp_header_state", + srcs = [ + "tcp.go", + ], + out = "tcp_header_state.go", + package = "header", +) + +go_library( + name = "header", + srcs = [ + "arp.go", + "checksum.go", + "eth.go", + "gue.go", + "icmpv4.go", + "icmpv6.go", + "interfaces.go", + "ipv4.go", + "ipv6.go", + "ipv6_fragment.go", + "tcp.go", + "tcp_header_state.go", + "udp.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/header", + visibility = ["//visibility:public"], + deps = [ + "//pkg/state", + "//pkg/tcpip", + "//pkg/tcpip/seqnum", + ], +) + +go_test( + name = "header_test", + size = "small", + srcs = [ + "ipversion_test.go", + "tcp_test.go", + ], + deps = [ + ":header", + ], +) diff --git a/pkg/tcpip/header/arp.go b/pkg/tcpip/header/arp.go new file mode 100644 index 000000000..af7f988f3 --- /dev/null +++ b/pkg/tcpip/header/arp.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import "gvisor.googlesource.com/gvisor/pkg/tcpip" + +const ( + // ARPProtocolNumber is the ARP network protocol number. + ARPProtocolNumber tcpip.NetworkProtocolNumber = 0x0806 + + // ARPSize is the size of an IPv4-over-Ethernet ARP packet. + ARPSize = 2 + 2 + 1 + 1 + 2 + 2*6 + 2*4 +) + +// ARPOp is an ARP opcode. +type ARPOp uint16 + +// Typical ARP opcodes defined in RFC 826. +const ( + ARPRequest ARPOp = 1 + ARPReply ARPOp = 2 +) + +// ARP is an ARP packet stored in a byte array as described in RFC 826. +type ARP []byte + +func (a ARP) hardwareAddressSpace() uint16 { return uint16(a[0])<<8 | uint16(a[1]) } +func (a ARP) protocolAddressSpace() uint16 { return uint16(a[2])<<8 | uint16(a[3]) } +func (a ARP) hardwareAddressSize() int { return int(a[4]) } +func (a ARP) protocolAddressSize() int { return int(a[5]) } + +// Op is the ARP opcode. +func (a ARP) Op() ARPOp { return ARPOp(a[6])<<8 | ARPOp(a[7]) } + +// SetOp sets the ARP opcode. +func (a ARP) SetOp(op ARPOp) { + a[6] = uint8(op >> 8) + a[7] = uint8(op) +} + +// SetIPv4OverEthernet configures the ARP packet for IPv4-over-Ethernet. +func (a ARP) SetIPv4OverEthernet() { + a[0], a[1] = 0, 1 // htypeEthernet + a[2], a[3] = 0x08, 0x00 // IPv4ProtocolNumber + a[4] = 6 // macSize + a[5] = uint8(IPv4AddressSize) +} + +// HardwareAddressSender is the link address of the sender. +// It is a view on to the ARP packet so it can be used to set the value. +func (a ARP) HardwareAddressSender() []byte { + const s = 8 + return a[s : s+6] +} + +// ProtocolAddressSender is the protocol address of the sender. +// It is a view on to the ARP packet so it can be used to set the value. +func (a ARP) ProtocolAddressSender() []byte { + const s = 8 + 6 + return a[s : s+4] +} + +// HardwareAddressTarget is the link address of the target. +// It is a view on to the ARP packet so it can be used to set the value. +func (a ARP) HardwareAddressTarget() []byte { + const s = 8 + 6 + 4 + return a[s : s+6] +} + +// ProtocolAddressTarget is the protocol address of the target. +// It is a view on to the ARP packet so it can be used to set the value. +func (a ARP) ProtocolAddressTarget() []byte { + const s = 8 + 6 + 4 + 6 + return a[s : s+4] +} + +// IsValid reports whether this is an ARP packet for IPv4 over Ethernet. +func (a ARP) IsValid() bool { + if len(a) < ARPSize { + return false + } + const htypeEthernet = 1 + const macSize = 6 + return a.hardwareAddressSpace() == htypeEthernet && + a.protocolAddressSpace() == uint16(IPv4ProtocolNumber) && + a.hardwareAddressSize() == macSize && + a.protocolAddressSize() == IPv4AddressSize +} diff --git a/pkg/tcpip/header/checksum.go b/pkg/tcpip/header/checksum.go new file mode 100644 index 000000000..6399b1b95 --- /dev/null +++ b/pkg/tcpip/header/checksum.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package header provides the implementation of the encoding and decoding of +// network protocol headers. +package header + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +// Checksum calculates the checksum (as defined in RFC 1071) of the bytes in the +// given byte array. +func Checksum(buf []byte, initial uint16) uint16 { + v := uint32(initial) + + l := len(buf) + if l&1 != 0 { + l-- + v += uint32(buf[l]) << 8 + } + + for i := 0; i < l; i += 2 { + v += (uint32(buf[i]) << 8) + uint32(buf[i+1]) + } + + return ChecksumCombine(uint16(v), uint16(v>>16)) +} + +// ChecksumCombine combines the two uint16 to form their checksum. This is done +// by adding them and the carry. +func ChecksumCombine(a, b uint16) uint16 { + v := uint32(a) + uint32(b) + return uint16(v + v>>16) +} + +// PseudoHeaderChecksum calculates the pseudo-header checksum for the +// given destination protocol and network address, ignoring the length +// field. Pseudo-headers are needed by transport layers when calculating +// their own checksum. +func PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, srcAddr tcpip.Address, dstAddr tcpip.Address) uint16 { + xsum := Checksum([]byte(srcAddr), 0) + xsum = Checksum([]byte(dstAddr), xsum) + return Checksum([]byte{0, uint8(protocol)}, xsum) +} diff --git a/pkg/tcpip/header/eth.go b/pkg/tcpip/header/eth.go new file mode 100644 index 000000000..23b7efdfc --- /dev/null +++ b/pkg/tcpip/header/eth.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + dstMAC = 0 + srcMAC = 6 + ethType = 12 +) + +// EthernetFields contains the fields of an ethernet frame header. It is used to +// describe the fields of a frame that needs to be encoded. +type EthernetFields struct { + // SrcAddr is the "MAC source" field of an ethernet frame header. + SrcAddr tcpip.LinkAddress + + // DstAddr is the "MAC destination" field of an ethernet frame header. + DstAddr tcpip.LinkAddress + + // Type is the "ethertype" field of an ethernet frame header. + Type tcpip.NetworkProtocolNumber +} + +// Ethernet represents an ethernet frame header stored in a byte array. +type Ethernet []byte + +const ( + // EthernetMinimumSize is the minimum size of a valid ethernet frame. + EthernetMinimumSize = 14 + + // EthernetAddressSize is the size, in bytes, of an ethernet address. + EthernetAddressSize = 6 +) + +// SourceAddress returns the "MAC source" field of the ethernet frame header. +func (b Ethernet) SourceAddress() tcpip.LinkAddress { + return tcpip.LinkAddress(b[srcMAC:][:EthernetAddressSize]) +} + +// DestinationAddress returns the "MAC destination" field of the ethernet frame +// header. +func (b Ethernet) DestinationAddress() tcpip.LinkAddress { + return tcpip.LinkAddress(b[dstMAC:][:EthernetAddressSize]) +} + +// Type returns the "ethertype" field of the ethernet frame header. +func (b Ethernet) Type() tcpip.NetworkProtocolNumber { + return tcpip.NetworkProtocolNumber(binary.BigEndian.Uint16(b[ethType:])) +} + +// Encode encodes all the fields of the ethernet frame header. +func (b Ethernet) Encode(e *EthernetFields) { + binary.BigEndian.PutUint16(b[ethType:], uint16(e.Type)) + copy(b[srcMAC:][:EthernetAddressSize], e.SrcAddr) + copy(b[dstMAC:][:EthernetAddressSize], e.DstAddr) +} diff --git a/pkg/tcpip/header/gue.go b/pkg/tcpip/header/gue.go new file mode 100644 index 000000000..a069fb669 --- /dev/null +++ b/pkg/tcpip/header/gue.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +const ( + typeHLen = 0 + encapProto = 1 +) + +// GUEFields contains the fields of a GUE packet. It is used to describe the +// fields of a packet that needs to be encoded. +type GUEFields struct { + // Type is the "type" field of the GUE header. + Type uint8 + + // Control is the "control" field of the GUE header. + Control bool + + // HeaderLength is the "header length" field of the GUE header. It must + // be at least 4 octets, and a multiple of 4 as well. + HeaderLength uint8 + + // Protocol is the "protocol" field of the GUE header. This is one of + // the IPPROTO_* values. + Protocol uint8 +} + +// GUE represents a Generic UDP Encapsulation header stored in a byte array, the +// fields are described in https://tools.ietf.org/html/draft-ietf-nvo3-gue-01. +type GUE []byte + +const ( + // GUEMinimumSize is the minimum size of a valid GUE packet. + GUEMinimumSize = 4 +) + +// TypeAndControl returns the GUE packet type (top 3 bits of the first byte, +// which includes the control bit). +func (b GUE) TypeAndControl() uint8 { + return b[typeHLen] >> 5 +} + +// HeaderLength returns the total length of the GUE header. +func (b GUE) HeaderLength() uint8 { + return 4 + 4*(b[typeHLen]&0x1f) +} + +// Protocol returns the protocol field of the GUE header. +func (b GUE) Protocol() uint8 { + return b[encapProto] +} + +// Encode encodes all the fields of the GUE header. +func (b GUE) Encode(i *GUEFields) { + ctl := uint8(0) + if i.Control { + ctl = 1 << 5 + } + b[typeHLen] = ctl | i.Type<<6 | (i.HeaderLength-4)/4 + b[encapProto] = i.Protocol +} diff --git a/pkg/tcpip/header/icmpv4.go b/pkg/tcpip/header/icmpv4.go new file mode 100644 index 000000000..9f1ad38fc --- /dev/null +++ b/pkg/tcpip/header/icmpv4.go @@ -0,0 +1,98 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +// ICMPv4 represents an ICMPv4 header stored in a byte array. +type ICMPv4 []byte + +const ( + // ICMPv4MinimumSize is the minimum size of a valid ICMP packet. + ICMPv4MinimumSize = 4 + + // ICMPv4EchoMinimumSize is the minimum size of a valid ICMP echo packet. + ICMPv4EchoMinimumSize = 6 + + // ICMPv4DstUnreachableMinimumSize is the minimum size of a valid ICMP + // destination unreachable packet. + ICMPv4DstUnreachableMinimumSize = ICMPv4MinimumSize + 4 + + // ICMPv4ProtocolNumber is the ICMP transport protocol number. + ICMPv4ProtocolNumber tcpip.TransportProtocolNumber = 1 +) + +// ICMPv4Type is the ICMP type field described in RFC 792. +type ICMPv4Type byte + +// Typical values of ICMPv4Type defined in RFC 792. +const ( + ICMPv4EchoReply ICMPv4Type = 0 + ICMPv4DstUnreachable ICMPv4Type = 3 + ICMPv4SrcQuench ICMPv4Type = 4 + ICMPv4Redirect ICMPv4Type = 5 + ICMPv4Echo ICMPv4Type = 8 + ICMPv4TimeExceeded ICMPv4Type = 11 + ICMPv4ParamProblem ICMPv4Type = 12 + ICMPv4Timestamp ICMPv4Type = 13 + ICMPv4TimestampReply ICMPv4Type = 14 + ICMPv4InfoRequest ICMPv4Type = 15 + ICMPv4InfoReply ICMPv4Type = 16 +) + +// Values for ICMP code as defined in RFC 792. +const ( + ICMPv4PortUnreachable = 3 + ICMPv4FragmentationNeeded = 4 +) + +// Type is the ICMP type field. +func (b ICMPv4) Type() ICMPv4Type { return ICMPv4Type(b[0]) } + +// SetType sets the ICMP type field. +func (b ICMPv4) SetType(t ICMPv4Type) { b[0] = byte(t) } + +// Code is the ICMP code field. Its meaning depends on the value of Type. +func (b ICMPv4) Code() byte { return b[1] } + +// SetCode sets the ICMP code field. +func (b ICMPv4) SetCode(c byte) { b[1] = c } + +// Checksum is the ICMP checksum field. +func (b ICMPv4) Checksum() uint16 { + return binary.BigEndian.Uint16(b[2:]) +} + +// SetChecksum sets the ICMP checksum field. +func (b ICMPv4) SetChecksum(checksum uint16) { + binary.BigEndian.PutUint16(b[2:], checksum) +} + +// SourcePort implements Transport.SourcePort. +func (ICMPv4) SourcePort() uint16 { + return 0 +} + +// DestinationPort implements Transport.DestinationPort. +func (ICMPv4) DestinationPort() uint16 { + return 0 +} + +// SetSourcePort implements Transport.SetSourcePort. +func (ICMPv4) SetSourcePort(uint16) { +} + +// SetDestinationPort implements Transport.SetDestinationPort. +func (ICMPv4) SetDestinationPort(uint16) { +} + +// Payload implements Transport.Payload. +func (b ICMPv4) Payload() []byte { + return b[ICMPv4MinimumSize:] +} diff --git a/pkg/tcpip/header/icmpv6.go b/pkg/tcpip/header/icmpv6.go new file mode 100644 index 000000000..a061cd02b --- /dev/null +++ b/pkg/tcpip/header/icmpv6.go @@ -0,0 +1,111 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +// ICMPv6 represents an ICMPv6 header stored in a byte array. +type ICMPv6 []byte + +const ( + // ICMPv6MinimumSize is the minimum size of a valid ICMP packet. + ICMPv6MinimumSize = 4 + + // ICMPv6ProtocolNumber is the ICMP transport protocol number. + ICMPv6ProtocolNumber tcpip.TransportProtocolNumber = 58 + + // ICMPv6NeighborSolicitMinimumSize is the minimum size of a + // neighbor solicitation packet. + ICMPv6NeighborSolicitMinimumSize = ICMPv6MinimumSize + 4 + 16 + + // ICMPv6NeighborAdvertSize is size of a neighbor advertisement. + ICMPv6NeighborAdvertSize = 32 + + // ICMPv6EchoMinimumSize is the minimum size of a valid ICMP echo packet. + ICMPv6EchoMinimumSize = 8 + + // ICMPv6DstUnreachableMinimumSize is the minimum size of a valid ICMP + // destination unreachable packet. + ICMPv6DstUnreachableMinimumSize = ICMPv6MinimumSize + 4 + + // ICMPv6PacketTooBigMinimumSize is the minimum size of a valid ICMP + // packet-too-big packet. + ICMPv6PacketTooBigMinimumSize = ICMPv6MinimumSize + 4 +) + +// ICMPv6Type is the ICMP type field described in RFC 4443 and friends. +type ICMPv6Type byte + +// Typical values of ICMPv6Type defined in RFC 4443. +const ( + ICMPv6DstUnreachable ICMPv6Type = 1 + ICMPv6PacketTooBig ICMPv6Type = 2 + ICMPv6TimeExceeded ICMPv6Type = 3 + ICMPv6ParamProblem ICMPv6Type = 4 + ICMPv6EchoRequest ICMPv6Type = 128 + ICMPv6EchoReply ICMPv6Type = 129 + + // Neighbor Discovery Protocol (NDP) messages, see RFC 4861. + + ICMPv6RouterSolicit ICMPv6Type = 133 + ICMPv6RouterAdvert ICMPv6Type = 134 + ICMPv6NeighborSolicit ICMPv6Type = 135 + ICMPv6NeighborAdvert ICMPv6Type = 136 + ICMPv6RedirectMsg ICMPv6Type = 137 +) + +// Values for ICMP code as defined in RFC 4443. +const ( + ICMPv6PortUnreachable = 4 +) + +// Type is the ICMP type field. +func (b ICMPv6) Type() ICMPv6Type { return ICMPv6Type(b[0]) } + +// SetType sets the ICMP type field. +func (b ICMPv6) SetType(t ICMPv6Type) { b[0] = byte(t) } + +// Code is the ICMP code field. Its meaning depends on the value of Type. +func (b ICMPv6) Code() byte { return b[1] } + +// SetCode sets the ICMP code field. +func (b ICMPv6) SetCode(c byte) { b[1] = c } + +// Checksum is the ICMP checksum field. +func (b ICMPv6) Checksum() uint16 { + return binary.BigEndian.Uint16(b[2:]) +} + +// SetChecksum calculates and sets the ICMP checksum field. +func (b ICMPv6) SetChecksum(checksum uint16) { + binary.BigEndian.PutUint16(b[2:], checksum) +} + +// SourcePort implements Transport.SourcePort. +func (ICMPv6) SourcePort() uint16 { + return 0 +} + +// DestinationPort implements Transport.DestinationPort. +func (ICMPv6) DestinationPort() uint16 { + return 0 +} + +// SetSourcePort implements Transport.SetSourcePort. +func (ICMPv6) SetSourcePort(uint16) { +} + +// SetDestinationPort implements Transport.SetDestinationPort. +func (ICMPv6) SetDestinationPort(uint16) { +} + +// Payload implements Transport.Payload. +func (b ICMPv6) Payload() []byte { + return b[ICMPv6MinimumSize:] +} diff --git a/pkg/tcpip/header/interfaces.go b/pkg/tcpip/header/interfaces.go new file mode 100644 index 000000000..a92286761 --- /dev/null +++ b/pkg/tcpip/header/interfaces.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + // MaxIPPacketSize is the maximum supported IP packet size, excluding + // jumbograms. The maximum IPv4 packet size is 64k-1 (total size must fit + // in 16 bits). For IPv6, the payload max size (excluding jumbograms) is + // 64k-1 (also needs to fit in 16 bits). So we use 64k - 1 + 2 * m, where + // m is the minimum IPv6 header size; we leave room for some potential + // IP options. + MaxIPPacketSize = 0xffff + 2*IPv6MinimumSize +) + +// Transport offers generic methods to query and/or update the fields of the +// header of a transport protocol buffer. +type Transport interface { + // SourcePort returns the value of the "source port" field. + SourcePort() uint16 + + // Destination returns the value of the "destination port" field. + DestinationPort() uint16 + + // Checksum returns the value of the "checksum" field. + Checksum() uint16 + + // SetSourcePort sets the value of the "source port" field. + SetSourcePort(uint16) + + // SetDestinationPort sets the value of the "destination port" field. + SetDestinationPort(uint16) + + // SetChecksum sets the value of the "checksum" field. + SetChecksum(uint16) + + // Payload returns the data carried in the transport buffer. + Payload() []byte +} + +// Network offers generic methods to query and/or update the fields of the +// header of a network protocol buffer. +type Network interface { + // SourceAddress returns the value of the "source address" field. + SourceAddress() tcpip.Address + + // DestinationAddress returns the value of the "destination address" + // field. + DestinationAddress() tcpip.Address + + // Checksum returns the value of the "checksum" field. + Checksum() uint16 + + // SetSourceAddress sets the value of the "source address" field. + SetSourceAddress(tcpip.Address) + + // SetDestinationAddress sets the value of the "destination address" + // field. + SetDestinationAddress(tcpip.Address) + + // SetChecksum sets the value of the "checksum" field. + SetChecksum(uint16) + + // TransportProtocol returns the number of the transport protocol + // stored in the payload. + TransportProtocol() tcpip.TransportProtocolNumber + + // Payload returns a byte slice containing the payload of the network + // packet. + Payload() []byte + + // TOS returns the values of the "type of service" and "flow label" fields. + TOS() (uint8, uint32) + + // SetTOS sets the values of the "type of service" and "flow label" fields. + SetTOS(t uint8, l uint32) +} diff --git a/pkg/tcpip/header/ipv4.go b/pkg/tcpip/header/ipv4.go new file mode 100644 index 000000000..cb0d42093 --- /dev/null +++ b/pkg/tcpip/header/ipv4.go @@ -0,0 +1,251 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + versIHL = 0 + tos = 1 + totalLen = 2 + id = 4 + flagsFO = 6 + ttl = 8 + protocol = 9 + checksum = 10 + srcAddr = 12 + dstAddr = 16 +) + +// IPv4Fields contains the fields of an IPv4 packet. It is used to describe the +// fields of a packet that needs to be encoded. +type IPv4Fields struct { + // IHL is the "internet header length" field of an IPv4 packet. + IHL uint8 + + // TOS is the "type of service" field of an IPv4 packet. + TOS uint8 + + // TotalLength is the "total length" field of an IPv4 packet. + TotalLength uint16 + + // ID is the "identification" field of an IPv4 packet. + ID uint16 + + // Flags is the "flags" field of an IPv4 packet. + Flags uint8 + + // FragmentOffset is the "fragment offset" field of an IPv4 packet. + FragmentOffset uint16 + + // TTL is the "time to live" field of an IPv4 packet. + TTL uint8 + + // Protocol is the "protocol" field of an IPv4 packet. + Protocol uint8 + + // Checksum is the "checksum" field of an IPv4 packet. + Checksum uint16 + + // SrcAddr is the "source ip address" of an IPv4 packet. + SrcAddr tcpip.Address + + // DstAddr is the "destination ip address" of an IPv4 packet. + DstAddr tcpip.Address +} + +// IPv4 represents an ipv4 header stored in a byte array. +// Most of the methods of IPv4 access to the underlying slice without +// checking the boundaries and could panic because of 'index out of range'. +// Always call IsValid() to validate an instance of IPv4 before using other methods. +type IPv4 []byte + +const ( + // IPv4MinimumSize is the minimum size of a valid IPv4 packet. + IPv4MinimumSize = 20 + + // IPv4MaximumHeaderSize is the maximum size of an IPv4 header. Given + // that there are only 4 bits to represents the header length in 32-bit + // units, the header cannot exceed 15*4 = 60 bytes. + IPv4MaximumHeaderSize = 60 + + // IPv4AddressSize is the size, in bytes, of an IPv4 address. + IPv4AddressSize = 4 + + // IPv4ProtocolNumber is IPv4's network protocol number. + IPv4ProtocolNumber tcpip.NetworkProtocolNumber = 0x0800 + + // IPv4Version is the version of the ipv4 procotol. + IPv4Version = 4 +) + +// Flags that may be set in an IPv4 packet. +const ( + IPv4FlagMoreFragments = 1 << iota + IPv4FlagDontFragment +) + +// IPVersion returns the version of IP used in the given packet. It returns -1 +// if the packet is not large enough to contain the version field. +func IPVersion(b []byte) int { + // Length must be at least offset+length of version field. + if len(b) < versIHL+1 { + return -1 + } + return int(b[versIHL] >> 4) +} + +// HeaderLength returns the value of the "header length" field of the ipv4 +// header. +func (b IPv4) HeaderLength() uint8 { + return (b[versIHL] & 0xf) * 4 +} + +// ID returns the value of the identifier field of the ipv4 header. +func (b IPv4) ID() uint16 { + return binary.BigEndian.Uint16(b[id:]) +} + +// Protocol returns the value of the protocol field of the ipv4 header. +func (b IPv4) Protocol() uint8 { + return b[protocol] +} + +// Flags returns the "flags" field of the ipv4 header. +func (b IPv4) Flags() uint8 { + return uint8(binary.BigEndian.Uint16(b[flagsFO:]) >> 13) +} + +// TTL returns the "TTL" field of the ipv4 header. +func (b IPv4) TTL() uint8 { + return b[ttl] +} + +// FragmentOffset returns the "fragment offset" field of the ipv4 header. +func (b IPv4) FragmentOffset() uint16 { + return binary.BigEndian.Uint16(b[flagsFO:]) << 3 +} + +// TotalLength returns the "total length" field of the ipv4 header. +func (b IPv4) TotalLength() uint16 { + return binary.BigEndian.Uint16(b[totalLen:]) +} + +// Checksum returns the checksum field of the ipv4 header. +func (b IPv4) Checksum() uint16 { + return binary.BigEndian.Uint16(b[checksum:]) +} + +// SourceAddress returns the "source address" field of the ipv4 header. +func (b IPv4) SourceAddress() tcpip.Address { + return tcpip.Address(b[srcAddr : srcAddr+IPv4AddressSize]) +} + +// DestinationAddress returns the "destination address" field of the ipv4 +// header. +func (b IPv4) DestinationAddress() tcpip.Address { + return tcpip.Address(b[dstAddr : dstAddr+IPv4AddressSize]) +} + +// TransportProtocol implements Network.TransportProtocol. +func (b IPv4) TransportProtocol() tcpip.TransportProtocolNumber { + return tcpip.TransportProtocolNumber(b.Protocol()) +} + +// Payload implements Network.Payload. +func (b IPv4) Payload() []byte { + return b[b.HeaderLength():][:b.PayloadLength()] +} + +// PayloadLength returns the length of the payload portion of the ipv4 packet. +func (b IPv4) PayloadLength() uint16 { + return b.TotalLength() - uint16(b.HeaderLength()) +} + +// TOS returns the "type of service" field of the ipv4 header. +func (b IPv4) TOS() (uint8, uint32) { + return b[tos], 0 +} + +// SetTOS sets the "type of service" field of the ipv4 header. +func (b IPv4) SetTOS(v uint8, _ uint32) { + b[tos] = v +} + +// SetTotalLength sets the "total length" field of the ipv4 header. +func (b IPv4) SetTotalLength(totalLength uint16) { + binary.BigEndian.PutUint16(b[totalLen:], totalLength) +} + +// SetChecksum sets the checksum field of the ipv4 header. +func (b IPv4) SetChecksum(v uint16) { + binary.BigEndian.PutUint16(b[checksum:], v) +} + +// SetFlagsFragmentOffset sets the "flags" and "fragment offset" fields of the +// ipv4 header. +func (b IPv4) SetFlagsFragmentOffset(flags uint8, offset uint16) { + v := (uint16(flags) << 13) | (offset >> 3) + binary.BigEndian.PutUint16(b[flagsFO:], v) +} + +// SetSourceAddress sets the "source address" field of the ipv4 header. +func (b IPv4) SetSourceAddress(addr tcpip.Address) { + copy(b[srcAddr:srcAddr+IPv4AddressSize], addr) +} + +// SetDestinationAddress sets the "destination address" field of the ipv4 +// header. +func (b IPv4) SetDestinationAddress(addr tcpip.Address) { + copy(b[dstAddr:dstAddr+IPv4AddressSize], addr) +} + +// CalculateChecksum calculates the checksum of the ipv4 header. +func (b IPv4) CalculateChecksum() uint16 { + return Checksum(b[:b.HeaderLength()], 0) +} + +// Encode encodes all the fields of the ipv4 header. +func (b IPv4) Encode(i *IPv4Fields) { + b[versIHL] = (4 << 4) | ((i.IHL / 4) & 0xf) + b[tos] = i.TOS + b.SetTotalLength(i.TotalLength) + binary.BigEndian.PutUint16(b[id:], i.ID) + b.SetFlagsFragmentOffset(i.Flags, i.FragmentOffset) + b[ttl] = i.TTL + b[protocol] = i.Protocol + b.SetChecksum(i.Checksum) + copy(b[srcAddr:srcAddr+IPv4AddressSize], i.SrcAddr) + copy(b[dstAddr:dstAddr+IPv4AddressSize], i.DstAddr) +} + +// EncodePartial updates the total length and checksum fields of ipv4 header, +// taking in the partial checksum, which is the checksum of the header without +// the total length and checksum fields. It is useful in cases when similar +// packets are produced. +func (b IPv4) EncodePartial(partialChecksum, totalLength uint16) { + b.SetTotalLength(totalLength) + checksum := Checksum(b[totalLen:totalLen+2], partialChecksum) + b.SetChecksum(^checksum) +} + +// IsValid performs basic validation on the packet. +func (b IPv4) IsValid(pktSize int) bool { + if len(b) < IPv4MinimumSize { + return false + } + + hlen := int(b.HeaderLength()) + tlen := int(b.TotalLength()) + if hlen > tlen || tlen > pktSize { + return false + } + + return true +} diff --git a/pkg/tcpip/header/ipv6.go b/pkg/tcpip/header/ipv6.go new file mode 100644 index 000000000..d8dc138b3 --- /dev/null +++ b/pkg/tcpip/header/ipv6.go @@ -0,0 +1,191 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + versTCFL = 0 + payloadLen = 4 + nextHdr = 6 + hopLimit = 7 + v6SrcAddr = 8 + v6DstAddr = 24 +) + +// IPv6Fields contains the fields of an IPv6 packet. It is used to describe the +// fields of a packet that needs to be encoded. +type IPv6Fields struct { + // TrafficClass is the "traffic class" field of an IPv6 packet. + TrafficClass uint8 + + // FlowLabel is the "flow label" field of an IPv6 packet. + FlowLabel uint32 + + // PayloadLength is the "payload length" field of an IPv6 packet. + PayloadLength uint16 + + // NextHeader is the "next header" field of an IPv6 packet. + NextHeader uint8 + + // HopLimit is the "hop limit" field of an IPv6 packet. + HopLimit uint8 + + // SrcAddr is the "source ip address" of an IPv6 packet. + SrcAddr tcpip.Address + + // DstAddr is the "destination ip address" of an IPv6 packet. + DstAddr tcpip.Address +} + +// IPv6 represents an ipv6 header stored in a byte array. +// Most of the methods of IPv6 access to the underlying slice without +// checking the boundaries and could panic because of 'index out of range'. +// Always call IsValid() to validate an instance of IPv6 before using other methods. +type IPv6 []byte + +const ( + // IPv6MinimumSize is the minimum size of a valid IPv6 packet. + IPv6MinimumSize = 40 + + // IPv6AddressSize is the size, in bytes, of an IPv6 address. + IPv6AddressSize = 16 + + // IPv6ProtocolNumber is IPv6's network protocol number. + IPv6ProtocolNumber tcpip.NetworkProtocolNumber = 0x86dd + + // IPv6Version is the version of the ipv6 procotol. + IPv6Version = 6 + + // IPv6MinimumMTU is the minimum MTU required by IPv6, per RFC 2460, + // section 5. + IPv6MinimumMTU = 1280 +) + +// PayloadLength returns the value of the "payload length" field of the ipv6 +// header. +func (b IPv6) PayloadLength() uint16 { + return binary.BigEndian.Uint16(b[payloadLen:]) +} + +// HopLimit returns the value of the "hop limit" field of the ipv6 header. +func (b IPv6) HopLimit() uint8 { + return b[hopLimit] +} + +// NextHeader returns the value of the "next header" field of the ipv6 header. +func (b IPv6) NextHeader() uint8 { + return b[nextHdr] +} + +// TransportProtocol implements Network.TransportProtocol. +func (b IPv6) TransportProtocol() tcpip.TransportProtocolNumber { + return tcpip.TransportProtocolNumber(b.NextHeader()) +} + +// Payload implements Network.Payload. +func (b IPv6) Payload() []byte { + return b[IPv6MinimumSize:][:b.PayloadLength()] +} + +// SourceAddress returns the "source address" field of the ipv6 header. +func (b IPv6) SourceAddress() tcpip.Address { + return tcpip.Address(b[v6SrcAddr : v6SrcAddr+IPv6AddressSize]) +} + +// DestinationAddress returns the "destination address" field of the ipv6 +// header. +func (b IPv6) DestinationAddress() tcpip.Address { + return tcpip.Address(b[v6DstAddr : v6DstAddr+IPv6AddressSize]) +} + +// Checksum implements Network.Checksum. Given that IPv6 doesn't have a +// checksum, it just returns 0. +func (IPv6) Checksum() uint16 { + return 0 +} + +// TOS returns the "traffic class" and "flow label" fields of the ipv6 header. +func (b IPv6) TOS() (uint8, uint32) { + v := binary.BigEndian.Uint32(b[versTCFL:]) + return uint8(v >> 20), v & 0xfffff +} + +// SetTOS sets the "traffic class" and "flow label" fields of the ipv6 header. +func (b IPv6) SetTOS(t uint8, l uint32) { + vtf := (6 << 28) | (uint32(t) << 20) | (l & 0xfffff) + binary.BigEndian.PutUint32(b[versTCFL:], vtf) +} + +// SetPayloadLength sets the "payload length" field of the ipv6 header. +func (b IPv6) SetPayloadLength(payloadLength uint16) { + binary.BigEndian.PutUint16(b[payloadLen:], payloadLength) +} + +// SetSourceAddress sets the "source address" field of the ipv6 header. +func (b IPv6) SetSourceAddress(addr tcpip.Address) { + copy(b[v6SrcAddr:v6SrcAddr+IPv6AddressSize], addr) +} + +// SetDestinationAddress sets the "destination address" field of the ipv6 +// header. +func (b IPv6) SetDestinationAddress(addr tcpip.Address) { + copy(b[v6DstAddr:v6DstAddr+IPv6AddressSize], addr) +} + +// SetNextHeader sets the value of the "next header" field of the ipv6 header. +func (b IPv6) SetNextHeader(v uint8) { + b[nextHdr] = v +} + +// SetChecksum implements Network.SetChecksum. Given that IPv6 doesn't have a +// checksum, it is empty. +func (IPv6) SetChecksum(uint16) { +} + +// Encode encodes all the fields of the ipv6 header. +func (b IPv6) Encode(i *IPv6Fields) { + b.SetTOS(i.TrafficClass, i.FlowLabel) + b.SetPayloadLength(i.PayloadLength) + b[nextHdr] = i.NextHeader + b[hopLimit] = i.HopLimit + copy(b[v6SrcAddr:v6SrcAddr+IPv6AddressSize], i.SrcAddr) + copy(b[v6DstAddr:v6DstAddr+IPv6AddressSize], i.DstAddr) +} + +// IsValid performs basic validation on the packet. +func (b IPv6) IsValid(pktSize int) bool { + if len(b) < IPv6MinimumSize { + return false + } + + dlen := int(b.PayloadLength()) + if dlen > pktSize-IPv6MinimumSize { + return false + } + + return true +} + +// IsV4MappedAddress determines if the provided address is an IPv4 mapped +// address by checking if its prefix is 0:0:0:0:0:ffff::/96. +func IsV4MappedAddress(addr tcpip.Address) bool { + if len(addr) != IPv6AddressSize { + return false + } + + const prefix = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + for i := 0; i < len(prefix); i++ { + if prefix[i] != addr[i] { + return false + } + } + + return true +} diff --git a/pkg/tcpip/header/ipv6_fragment.go b/pkg/tcpip/header/ipv6_fragment.go new file mode 100644 index 000000000..04aa5c7b8 --- /dev/null +++ b/pkg/tcpip/header/ipv6_fragment.go @@ -0,0 +1,136 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + nextHdrFrag = 0 + fragOff = 2 + more = 3 + idV6 = 4 +) + +// IPv6FragmentFields contains the fields of an IPv6 fragment. It is used to describe the +// fields of a packet that needs to be encoded. +type IPv6FragmentFields struct { + // NextHeader is the "next header" field of an IPv6 fragment. + NextHeader uint8 + + // FragmentOffset is the "fragment offset" field of an IPv6 fragment. + FragmentOffset uint16 + + // M is the "more" field of an IPv6 fragment. + M bool + + // Identification is the "identification" field of an IPv6 fragment. + Identification uint32 +} + +// IPv6Fragment represents an ipv6 fragment header stored in a byte array. +// Most of the methods of IPv6Fragment access to the underlying slice without +// checking the boundaries and could panic because of 'index out of range'. +// Always call IsValid() to validate an instance of IPv6Fragment before using other methods. +type IPv6Fragment []byte + +const ( + // IPv6FragmentHeader header is the number used to specify that the next + // header is a fragment header, per RFC 2460. + IPv6FragmentHeader = 44 + + // IPv6FragmentHeaderSize is the size of the fragment header. + IPv6FragmentHeaderSize = 8 +) + +// Encode encodes all the fields of the ipv6 fragment. +func (b IPv6Fragment) Encode(i *IPv6FragmentFields) { + b[nextHdrFrag] = i.NextHeader + binary.BigEndian.PutUint16(b[fragOff:], i.FragmentOffset<<3) + if i.M { + b[more] |= 1 + } + binary.BigEndian.PutUint32(b[idV6:], i.Identification) +} + +// IsValid performs basic validation on the fragment header. +func (b IPv6Fragment) IsValid() bool { + return len(b) >= IPv6FragmentHeaderSize +} + +// NextHeader returns the value of the "next header" field of the ipv6 fragment. +func (b IPv6Fragment) NextHeader() uint8 { + return b[nextHdrFrag] +} + +// FragmentOffset returns the "fragment offset" field of the ipv6 fragment. +func (b IPv6Fragment) FragmentOffset() uint16 { + return binary.BigEndian.Uint16(b[fragOff:]) >> 3 +} + +// More returns the "more" field of the ipv6 fragment. +func (b IPv6Fragment) More() bool { + return b[more]&1 > 0 +} + +// Payload implements Network.Payload. +func (b IPv6Fragment) Payload() []byte { + return b[IPv6FragmentHeaderSize:] +} + +// ID returns the value of the identifier field of the ipv6 fragment. +func (b IPv6Fragment) ID() uint32 { + return binary.BigEndian.Uint32(b[idV6:]) +} + +// TransportProtocol implements Network.TransportProtocol. +func (b IPv6Fragment) TransportProtocol() tcpip.TransportProtocolNumber { + return tcpip.TransportProtocolNumber(b.NextHeader()) +} + +// The functions below have been added only to satisfy the Network interface. + +// Checksum is not supported by IPv6Fragment. +func (b IPv6Fragment) Checksum() uint16 { + panic("not supported") +} + +// SourceAddress is not supported by IPv6Fragment. +func (b IPv6Fragment) SourceAddress() tcpip.Address { + panic("not supported") +} + +// DestinationAddress is not supported by IPv6Fragment. +func (b IPv6Fragment) DestinationAddress() tcpip.Address { + panic("not supported") +} + +// SetSourceAddress is not supported by IPv6Fragment. +func (b IPv6Fragment) SetSourceAddress(tcpip.Address) { + panic("not supported") +} + +// SetDestinationAddress is not supported by IPv6Fragment. +func (b IPv6Fragment) SetDestinationAddress(tcpip.Address) { + panic("not supported") +} + +// SetChecksum is not supported by IPv6Fragment. +func (b IPv6Fragment) SetChecksum(uint16) { + panic("not supported") +} + +// TOS is not supported by IPv6Fragment. +func (b IPv6Fragment) TOS() (uint8, uint32) { + panic("not supported") +} + +// SetTOS is not supported by IPv6Fragment. +func (b IPv6Fragment) SetTOS(t uint8, l uint32) { + panic("not supported") +} diff --git a/pkg/tcpip/header/ipversion_test.go b/pkg/tcpip/header/ipversion_test.go new file mode 100644 index 000000000..5f3956160 --- /dev/null +++ b/pkg/tcpip/header/ipversion_test.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" +) + +func TestIPv4(t *testing.T) { + b := header.IPv4(make([]byte, header.IPv4MinimumSize)) + b.Encode(&header.IPv4Fields{}) + + const want = header.IPv4Version + if v := header.IPVersion(b); v != want { + t.Fatalf("Bad version, want %v, got %v", want, v) + } +} + +func TestIPv6(t *testing.T) { + b := header.IPv6(make([]byte, header.IPv6MinimumSize)) + b.Encode(&header.IPv6Fields{}) + + const want = header.IPv6Version + if v := header.IPVersion(b); v != want { + t.Fatalf("Bad version, want %v, got %v", want, v) + } +} + +func TestOtherVersion(t *testing.T) { + const want = header.IPv4Version + header.IPv6Version + b := make([]byte, 1) + b[0] = want << 4 + + if v := header.IPVersion(b); v != want { + t.Fatalf("Bad version, want %v, got %v", want, v) + } +} + +func TestTooShort(t *testing.T) { + b := make([]byte, 1) + b[0] = (header.IPv4Version + header.IPv6Version) << 4 + + // Get the version of a zero-length slice. + const want = -1 + if v := header.IPVersion(b[:0]); v != want { + t.Fatalf("Bad version, want %v, got %v", want, v) + } + + // Get the version of a nil slice. + if v := header.IPVersion(nil); v != want { + t.Fatalf("Bad version, want %v, got %v", want, v) + } +} diff --git a/pkg/tcpip/header/tcp.go b/pkg/tcpip/header/tcp.go new file mode 100644 index 000000000..995df4076 --- /dev/null +++ b/pkg/tcpip/header/tcp.go @@ -0,0 +1,518 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +const ( + srcPort = 0 + dstPort = 2 + seqNum = 4 + ackNum = 8 + dataOffset = 12 + tcpFlags = 13 + winSize = 14 + tcpChecksum = 16 + urgentPtr = 18 +) + +const ( + // MaxWndScale is maximum allowed window scaling, as described in + // RFC 1323, section 2.3, page 11. + MaxWndScale = 14 + + // TCPMaxSACKBlocks is the maximum number of SACK blocks that can + // be encoded in a TCP option field. + TCPMaxSACKBlocks = 4 +) + +// Flags that may be set in a TCP segment. +const ( + TCPFlagFin = 1 << iota + TCPFlagSyn + TCPFlagRst + TCPFlagPsh + TCPFlagAck + TCPFlagUrg +) + +// Options that may be present in a TCP segment. +const ( + TCPOptionEOL = 0 + TCPOptionNOP = 1 + TCPOptionMSS = 2 + TCPOptionWS = 3 + TCPOptionTS = 8 + TCPOptionSACKPermitted = 4 + TCPOptionSACK = 5 +) + +// TCPFields contains the fields of a TCP packet. It is used to describe the +// fields of a packet that needs to be encoded. +type TCPFields struct { + // SrcPort is the "source port" field of a TCP packet. + SrcPort uint16 + + // DstPort is the "destination port" field of a TCP packet. + DstPort uint16 + + // SeqNum is the "sequence number" field of a TCP packet. + SeqNum uint32 + + // AckNum is the "acknowledgement number" field of a TCP packet. + AckNum uint32 + + // DataOffset is the "data offset" field of a TCP packet. + DataOffset uint8 + + // Flags is the "flags" field of a TCP packet. + Flags uint8 + + // WindowSize is the "window size" field of a TCP packet. + WindowSize uint16 + + // Checksum is the "checksum" field of a TCP packet. + Checksum uint16 + + // UrgentPointer is the "urgent pointer" field of a TCP packet. + UrgentPointer uint16 +} + +// TCPSynOptions is used to return the parsed TCP Options in a syn +// segment. +type TCPSynOptions struct { + // MSS is the maximum segment size provided by the peer in the SYN. + MSS uint16 + + // WS is the window scale option provided by the peer in the SYN. + // + // Set to -1 if no window scale option was provided. + WS int + + // TS is true if the timestamp option was provided in the syn/syn-ack. + TS bool + + // TSVal is the value of the TSVal field in the timestamp option. + TSVal uint32 + + // TSEcr is the value of the TSEcr field in the timestamp option. + TSEcr uint32 + + // SACKPermitted is true if the SACK option was provided in the SYN/SYN-ACK. + SACKPermitted bool +} + +// SACKBlock represents a single contiguous SACK block. +type SACKBlock struct { + // Start indicates the lowest sequence number in the block. + Start seqnum.Value + + // End indicates the sequence number immediately following the last + // sequence number of this block. + End seqnum.Value +} + +// TCPOptions are used to parse and cache the TCP segment options for a non +// syn/syn-ack segment. +type TCPOptions struct { + // TS is true if the TimeStamp option is enabled. + TS bool + + // TSVal is the value in the TSVal field of the segment. + TSVal uint32 + + // TSEcr is the value in the TSEcr field of the segment. + TSEcr uint32 + + // SACKBlocks are the SACK blocks specified in the segment. + SACKBlocks []SACKBlock +} + +// TCP represents a TCP header stored in a byte array. +type TCP []byte + +const ( + // TCPMinimumSize is the minimum size of a valid TCP packet. + TCPMinimumSize = 20 + + // TCPProtocolNumber is TCP's transport protocol number. + TCPProtocolNumber tcpip.TransportProtocolNumber = 6 +) + +// SourcePort returns the "source port" field of the tcp header. +func (b TCP) SourcePort() uint16 { + return binary.BigEndian.Uint16(b[srcPort:]) +} + +// DestinationPort returns the "destination port" field of the tcp header. +func (b TCP) DestinationPort() uint16 { + return binary.BigEndian.Uint16(b[dstPort:]) +} + +// SequenceNumber returns the "sequence number" field of the tcp header. +func (b TCP) SequenceNumber() uint32 { + return binary.BigEndian.Uint32(b[seqNum:]) +} + +// AckNumber returns the "ack number" field of the tcp header. +func (b TCP) AckNumber() uint32 { + return binary.BigEndian.Uint32(b[ackNum:]) +} + +// DataOffset returns the "data offset" field of the tcp header. +func (b TCP) DataOffset() uint8 { + return (b[dataOffset] >> 4) * 4 +} + +// Payload returns the data in the tcp packet. +func (b TCP) Payload() []byte { + return b[b.DataOffset():] +} + +// Flags returns the flags field of the tcp header. +func (b TCP) Flags() uint8 { + return b[tcpFlags] +} + +// WindowSize returns the "window size" field of the tcp header. +func (b TCP) WindowSize() uint16 { + return binary.BigEndian.Uint16(b[winSize:]) +} + +// Checksum returns the "checksum" field of the tcp header. +func (b TCP) Checksum() uint16 { + return binary.BigEndian.Uint16(b[tcpChecksum:]) +} + +// SetSourcePort sets the "source port" field of the tcp header. +func (b TCP) SetSourcePort(port uint16) { + binary.BigEndian.PutUint16(b[srcPort:], port) +} + +// SetDestinationPort sets the "destination port" field of the tcp header. +func (b TCP) SetDestinationPort(port uint16) { + binary.BigEndian.PutUint16(b[dstPort:], port) +} + +// SetChecksum sets the checksum field of the tcp header. +func (b TCP) SetChecksum(checksum uint16) { + binary.BigEndian.PutUint16(b[tcpChecksum:], checksum) +} + +// CalculateChecksum calculates the checksum of the tcp segment given +// the totalLen and partialChecksum(descriptions below) +// totalLen is the total length of the segment +// partialChecksum is the checksum of the network-layer pseudo-header +// (excluding the total length) and the checksum of the segment data. +func (b TCP) CalculateChecksum(partialChecksum uint16, totalLen uint16) uint16 { + // Add the length portion of the checksum to the pseudo-checksum. + tmp := make([]byte, 2) + binary.BigEndian.PutUint16(tmp, totalLen) + checksum := Checksum(tmp, partialChecksum) + + // Calculate the rest of the checksum. + return Checksum(b[:b.DataOffset()], checksum) +} + +// Options returns a slice that holds the unparsed TCP options in the segment. +func (b TCP) Options() []byte { + return b[TCPMinimumSize:b.DataOffset()] +} + +// ParsedOptions returns a TCPOptions structure which parses and caches the TCP +// option values in the TCP segment. NOTE: Invoking this function repeatedly is +// expensive as it reparses the options on each invocation. +func (b TCP) ParsedOptions() TCPOptions { + return ParseTCPOptions(b.Options()) +} + +func (b TCP) encodeSubset(seq, ack uint32, flags uint8, rcvwnd uint16) { + binary.BigEndian.PutUint32(b[seqNum:], seq) + binary.BigEndian.PutUint32(b[ackNum:], ack) + b[tcpFlags] = flags + binary.BigEndian.PutUint16(b[winSize:], rcvwnd) +} + +// Encode encodes all the fields of the tcp header. +func (b TCP) Encode(t *TCPFields) { + b.encodeSubset(t.SeqNum, t.AckNum, t.Flags, t.WindowSize) + binary.BigEndian.PutUint16(b[srcPort:], t.SrcPort) + binary.BigEndian.PutUint16(b[dstPort:], t.DstPort) + b[dataOffset] = (t.DataOffset / 4) << 4 + binary.BigEndian.PutUint16(b[tcpChecksum:], t.Checksum) + binary.BigEndian.PutUint16(b[urgentPtr:], t.UrgentPointer) +} + +// EncodePartial updates a subset of the fields of the tcp header. It is useful +// in cases when similar segments are produced. +func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32, flags byte, rcvwnd uint16) { + // Add the total length and "flags" field contributions to the checksum. + // We don't use the flags field directly from the header because it's a + // one-byte field with an odd offset, so it would be accounted for + // incorrectly by the Checksum routine. + tmp := make([]byte, 4) + binary.BigEndian.PutUint16(tmp, length) + binary.BigEndian.PutUint16(tmp[2:], uint16(flags)) + checksum := Checksum(tmp, partialChecksum) + + // Encode the passed-in fields. + b.encodeSubset(seqnum, acknum, flags, rcvwnd) + + // Add the contributions of the passed-in fields to the checksum. + checksum = Checksum(b[seqNum:seqNum+8], checksum) + checksum = Checksum(b[winSize:winSize+2], checksum) + + // Encode the checksum. + b.SetChecksum(^checksum) +} + +// ParseSynOptions parses the options received in a SYN segment and returns the +// relevant ones. opts should point to the option part of the TCP Header. +func ParseSynOptions(opts []byte, isAck bool) TCPSynOptions { + limit := len(opts) + + synOpts := TCPSynOptions{ + // Per RFC 1122, page 85: "If an MSS option is not received at + // connection setup, TCP MUST assume a default send MSS of 536." + MSS: 536, + // If no window scale option is specified, WS in options is + // returned as -1; this is because the absence of the option + // indicates that the we cannot use window scaling on the + // receive end either. + WS: -1, + } + + for i := 0; i < limit; { + switch opts[i] { + case TCPOptionEOL: + i = limit + case TCPOptionNOP: + i++ + case TCPOptionMSS: + if i+4 > limit || opts[i+1] != 4 { + return synOpts + } + mss := uint16(opts[i+2])<<8 | uint16(opts[i+3]) + if mss == 0 { + return synOpts + } + synOpts.MSS = mss + i += 4 + + case TCPOptionWS: + if i+3 > limit || opts[i+1] != 3 { + return synOpts + } + ws := int(opts[i+2]) + if ws > MaxWndScale { + ws = MaxWndScale + } + synOpts.WS = ws + i += 3 + + case TCPOptionTS: + if i+10 > limit || opts[i+1] != 10 { + return synOpts + } + synOpts.TSVal = binary.BigEndian.Uint32(opts[i+2:]) + if isAck { + // If the segment is a SYN-ACK then store the Timestamp Echo Reply + // in the segment. + synOpts.TSEcr = binary.BigEndian.Uint32(opts[i+6:]) + } + synOpts.TS = true + i += 10 + case TCPOptionSACKPermitted: + if i+2 > limit || opts[i+1] != 2 { + return synOpts + } + synOpts.SACKPermitted = true + i += 2 + + default: + // We don't recognize this option, just skip over it. + if i+2 > limit { + return synOpts + } + l := int(opts[i+1]) + // If the length is incorrect or if l+i overflows the + // total options length then return false. + if l < 2 || i+l > limit { + return synOpts + } + i += l + } + } + + return synOpts +} + +// ParseTCPOptions extracts and stores all known options in the provided byte +// slice in a TCPOptions structure. +func ParseTCPOptions(b []byte) TCPOptions { + opts := TCPOptions{} + limit := len(b) + for i := 0; i < limit; { + switch b[i] { + case TCPOptionEOL: + i = limit + case TCPOptionNOP: + i++ + case TCPOptionTS: + if i+10 > limit || (b[i+1] != 10) { + return opts + } + opts.TS = true + opts.TSVal = binary.BigEndian.Uint32(b[i+2:]) + opts.TSEcr = binary.BigEndian.Uint32(b[i+6:]) + i += 10 + case TCPOptionSACK: + if i+2 > limit { + // Malformed SACK block, just return and stop parsing. + return opts + } + sackOptionLen := int(b[i+1]) + if i+sackOptionLen > limit || (sackOptionLen-2)%8 != 0 { + // Malformed SACK block, just return and stop parsing. + return opts + } + numBlocks := (sackOptionLen - 2) / 8 + opts.SACKBlocks = []SACKBlock{} + for j := 0; j < numBlocks; j++ { + start := binary.BigEndian.Uint32(b[i+2+j*8:]) + end := binary.BigEndian.Uint32(b[i+2+j*8+4:]) + opts.SACKBlocks = append(opts.SACKBlocks, SACKBlock{ + Start: seqnum.Value(start), + End: seqnum.Value(end), + }) + } + i += sackOptionLen + default: + // We don't recognize this option, just skip over it. + if i+2 > limit { + return opts + } + l := int(b[i+1]) + // If the length is incorrect or if l+i overflows the + // total options length then return false. + if l < 2 || i+l > limit { + return opts + } + i += l + } + } + return opts +} + +// EncodeMSSOption encodes the MSS TCP option with the provided MSS values in +// the supplied buffer. If the provided buffer is not large enough then it just +// returns without encoding anything. It returns the number of bytes written to +// the provided buffer. +func EncodeMSSOption(mss uint32, b []byte) int { + // mssOptionSize is the number of bytes in a valid MSS option. + const mssOptionSize = 4 + + if len(b) < mssOptionSize { + return 0 + } + b[0], b[1], b[2], b[3] = TCPOptionMSS, mssOptionSize, byte(mss>>8), byte(mss) + return mssOptionSize +} + +// EncodeWSOption encodes the WS TCP option with the WS value in the +// provided buffer. If the provided buffer is not large enough then it just +// returns without encoding anything. It returns the number of bytes written to +// the provided buffer. +func EncodeWSOption(ws int, b []byte) int { + if len(b) < 3 { + return 0 + } + b[0], b[1], b[2] = TCPOptionWS, 3, uint8(ws) + return int(b[1]) +} + +// EncodeTSOption encodes the provided tsVal and tsEcr values as a TCP timestamp +// option into the provided buffer. If the buffer is smaller than expected it +// just returns without encoding anything. It returns the number of bytes +// written to the provided buffer. +func EncodeTSOption(tsVal, tsEcr uint32, b []byte) int { + if len(b) < 10 { + return 0 + } + b[0], b[1] = TCPOptionTS, 10 + binary.BigEndian.PutUint32(b[2:], tsVal) + binary.BigEndian.PutUint32(b[6:], tsEcr) + return int(b[1]) +} + +// EncodeSACKPermittedOption encodes a SACKPermitted option into the provided +// buffer. If the buffer is smaller than required it just returns without +// encoding anything. It returns the number of bytes written to the provided +// buffer. +func EncodeSACKPermittedOption(b []byte) int { + if len(b) < 2 { + return 0 + } + + b[0], b[1] = TCPOptionSACKPermitted, 2 + return int(b[1]) +} + +// EncodeSACKBlocks encodes the provided SACK blocks as a TCP SACK option block +// in the provided slice. It tries to fit in as many blocks as possible based on +// number of bytes available in the provided buffer. It returns the number of +// bytes written to the provided buffer. +func EncodeSACKBlocks(sackBlocks []SACKBlock, b []byte) int { + if len(sackBlocks) == 0 { + return 0 + } + l := len(sackBlocks) + if l > TCPMaxSACKBlocks { + l = TCPMaxSACKBlocks + } + if ll := (len(b) - 2) / 8; ll < l { + l = ll + } + if l == 0 { + // There is not enough space in the provided buffer to add + // any SACK blocks. + return 0 + } + b[0] = TCPOptionSACK + b[1] = byte(l*8 + 2) + for i := 0; i < l; i++ { + binary.BigEndian.PutUint32(b[i*8+2:], uint32(sackBlocks[i].Start)) + binary.BigEndian.PutUint32(b[i*8+6:], uint32(sackBlocks[i].End)) + } + return int(b[1]) +} + +// EncodeNOP adds an explicit NOP to the option list. +func EncodeNOP(b []byte) int { + if len(b) == 0 { + return 0 + } + b[0] = TCPOptionNOP + return 1 +} + +// AddTCPOptionPadding adds the required number of TCPOptionNOP to quad align +// the option buffer. It adds padding bytes after the offset specified and +// returns the number of padding bytes added. The passed in options slice +// must have space for the padding bytes. +func AddTCPOptionPadding(options []byte, offset int) int { + paddingToAdd := -offset & 3 + // Now add any padding bytes that might be required to quad align the + // options. + for i := offset; i < offset+paddingToAdd; i++ { + options[i] = TCPOptionNOP + } + return paddingToAdd +} diff --git a/pkg/tcpip/header/tcp_test.go b/pkg/tcpip/header/tcp_test.go new file mode 100644 index 000000000..27d43479a --- /dev/null +++ b/pkg/tcpip/header/tcp_test.go @@ -0,0 +1,134 @@ +package header_test + +import ( + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" +) + +func TestEncodeSACKBlocks(t *testing.T) { + testCases := []struct { + sackBlocks []header.SACKBlock + want []header.SACKBlock + bufSize int + }{ + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}}, + 40, + }, + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, + 30, + }, + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + []header.SACKBlock{{10, 20}, {22, 30}}, + 20, + }, + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + []header.SACKBlock{{10, 20}}, + 10, + }, + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + nil, + 8, + }, + { + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}, + []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}, {42, 50}}, + 60, + }, + } + for _, tc := range testCases { + b := make([]byte, tc.bufSize) + t.Logf("testing: %v", tc) + header.EncodeSACKBlocks(tc.sackBlocks, b) + opts := header.ParseTCPOptions(b) + if got, want := opts.SACKBlocks, tc.want; !reflect.DeepEqual(got, want) { + t.Errorf("header.EncodeSACKBlocks(%v, %v), encoded blocks got: %v, want: %v", tc.sackBlocks, b, got, want) + } + } +} + +func TestTCPParseOptions(t *testing.T) { + type tsOption struct { + tsVal uint32 + tsEcr uint32 + } + + generateOptions := func(tsOpt *tsOption, sackBlocks []header.SACKBlock) []byte { + l := 0 + if tsOpt != nil { + l += 10 + } + if len(sackBlocks) != 0 { + l += len(sackBlocks)*8 + 2 + } + b := make([]byte, l) + offset := 0 + if tsOpt != nil { + offset = header.EncodeTSOption(tsOpt.tsVal, tsOpt.tsEcr, b) + } + header.EncodeSACKBlocks(sackBlocks, b[offset:]) + return b + } + + testCases := []struct { + b []byte + want header.TCPOptions + }{ + // Trivial cases. + {nil, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionNOP}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionNOP, header.TCPOptionNOP}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionEOL}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionNOP, header.TCPOptionEOL, header.TCPOptionTS, 10, 1, 1}, header.TCPOptions{false, 0, 0, nil}}, + + // Test timestamp parsing. + {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}}, + {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}}, + + // Test malformed timestamp option. + {[]byte{header.TCPOptionTS, 8, 1, 1}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 8, 1, 1}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionNOP, header.TCPOptionTS, 8, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}}, + + // Test SACKBlock parsing. + {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, 1, 0, 0, 0, 10}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{1, 10}}}}, + {[]byte{header.TCPOptionSACK, 18, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{1, 10}, {11, 12}}}}, + + // Test malformed SACK option. + {[]byte{header.TCPOptionSACK, 0}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 8, 0, 0, 0, 1, 0, 0, 0, 10}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 17, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 11, 0, 0, 0, 12}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 10}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, 1, 0, 0, 0}, header.TCPOptions{false, 0, 0, nil}}, + + // Test Timestamp + SACK block parsing. + {generateOptions(&tsOption{1, 1}, []header.SACKBlock{{1, 10}, {11, 12}}), header.TCPOptions{true, 1, 1, []header.SACKBlock{{1, 10}, {11, 12}}}}, + {generateOptions(&tsOption{1, 2}, []header.SACKBlock{{1, 10}, {11, 12}}), header.TCPOptions{true, 1, 2, []header.SACKBlock{{1, 10}, {11, 12}}}}, + {generateOptions(&tsOption{1, 3}, []header.SACKBlock{{1, 10}, {11, 12}, {13, 14}, {14, 15}, {15, 16}}), header.TCPOptions{true, 1, 3, []header.SACKBlock{{1, 10}, {11, 12}, {13, 14}, {14, 15}}}}, + + // Test valid timestamp + malformed SACK block parsing. + {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK}, header.TCPOptions{true, 1, 1, nil}}, + {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 10}, header.TCPOptions{true, 1, 1, nil}}, + {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 10, 0, 0, 0}, header.TCPOptions{true, 1, 1, nil}}, + {[]byte{header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{true, 1, 1, nil}}, + {[]byte{header.TCPOptionSACK, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}}, + {[]byte{header.TCPOptionSACK, 10, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{134873088, 65536}}}}, + {[]byte{header.TCPOptionSACK, 10, 0, 0, 0, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, []header.SACKBlock{{8, 167772160}}}}, + {[]byte{header.TCPOptionSACK, 11, 0, 0, 0, 1, 0, 0, 0, 1, header.TCPOptionTS, 10, 0, 0, 0, 1, 0, 0, 0, 1}, header.TCPOptions{false, 0, 0, nil}}, + } + for _, tc := range testCases { + if got, want := header.ParseTCPOptions(tc.b), tc.want; !reflect.DeepEqual(got, want) { + t.Errorf("ParseTCPOptions(%v) = %v, want: %v", tc.b, got, tc.want) + } + } +} diff --git a/pkg/tcpip/header/udp.go b/pkg/tcpip/header/udp.go new file mode 100644 index 000000000..7c2548634 --- /dev/null +++ b/pkg/tcpip/header/udp.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package header + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + udpSrcPort = 0 + udpDstPort = 2 + udpLength = 4 + udpChecksum = 6 +) + +// UDPFields contains the fields of a UDP packet. It is used to describe the +// fields of a packet that needs to be encoded. +type UDPFields struct { + // SrcPort is the "source port" field of a UDP packet. + SrcPort uint16 + + // DstPort is the "destination port" field of a UDP packet. + DstPort uint16 + + // Length is the "length" field of a UDP packet. + Length uint16 + + // Checksum is the "checksum" field of a UDP packet. + Checksum uint16 +} + +// UDP represents a UDP header stored in a byte array. +type UDP []byte + +const ( + // UDPMinimumSize is the minimum size of a valid UDP packet. + UDPMinimumSize = 8 + + // UDPProtocolNumber is UDP's transport protocol number. + UDPProtocolNumber tcpip.TransportProtocolNumber = 17 +) + +// SourcePort returns the "source port" field of the udp header. +func (b UDP) SourcePort() uint16 { + return binary.BigEndian.Uint16(b[udpSrcPort:]) +} + +// DestinationPort returns the "destination port" field of the udp header. +func (b UDP) DestinationPort() uint16 { + return binary.BigEndian.Uint16(b[udpDstPort:]) +} + +// Length returns the "length" field of the udp header. +func (b UDP) Length() uint16 { + return binary.BigEndian.Uint16(b[udpLength:]) +} + +// Payload returns the data contained in the UDP datagram. +func (b UDP) Payload() []byte { + return b[UDPMinimumSize:] +} + +// Checksum returns the "checksum" field of the udp header. +func (b UDP) Checksum() uint16 { + return binary.BigEndian.Uint16(b[udpChecksum:]) +} + +// SetSourcePort sets the "source port" field of the udp header. +func (b UDP) SetSourcePort(port uint16) { + binary.BigEndian.PutUint16(b[udpSrcPort:], port) +} + +// SetDestinationPort sets the "destination port" field of the udp header. +func (b UDP) SetDestinationPort(port uint16) { + binary.BigEndian.PutUint16(b[udpDstPort:], port) +} + +// SetChecksum sets the "checksum" field of the udp header. +func (b UDP) SetChecksum(checksum uint16) { + binary.BigEndian.PutUint16(b[udpChecksum:], checksum) +} + +// CalculateChecksum calculates the checksum of the udp packet, given the total +// length of the packet and the checksum of the network-layer pseudo-header +// (excluding the total length) and the checksum of the payload. +func (b UDP) CalculateChecksum(partialChecksum uint16, totalLen uint16) uint16 { + // Add the length portion of the checksum to the pseudo-checksum. + tmp := make([]byte, 2) + binary.BigEndian.PutUint16(tmp, totalLen) + checksum := Checksum(tmp, partialChecksum) + + // Calculate the rest of the checksum. + return Checksum(b[:UDPMinimumSize], checksum) +} + +// Encode encodes all the fields of the udp header. +func (b UDP) Encode(u *UDPFields) { + binary.BigEndian.PutUint16(b[udpSrcPort:], u.SrcPort) + binary.BigEndian.PutUint16(b[udpDstPort:], u.DstPort) + binary.BigEndian.PutUint16(b[udpLength:], u.Length) + binary.BigEndian.PutUint16(b[udpChecksum:], u.Checksum) +} diff --git a/pkg/tcpip/link/channel/BUILD b/pkg/tcpip/link/channel/BUILD new file mode 100644 index 000000000..b58a76699 --- /dev/null +++ b/pkg/tcpip/link/channel/BUILD @@ -0,0 +1,15 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "channel", + srcs = ["channel.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel", + visibility = ["//:sandbox"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/channel/channel.go b/pkg/tcpip/link/channel/channel.go new file mode 100644 index 000000000..cebc34553 --- /dev/null +++ b/pkg/tcpip/link/channel/channel.go @@ -0,0 +1,110 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package channel provides the implemention of channel-based data-link layer +// endpoints. Such endpoints allow injection of inbound packets and store +// outbound packets in a channel. +package channel + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// PacketInfo holds all the information about an outbound packet. +type PacketInfo struct { + Header buffer.View + Payload buffer.View + Proto tcpip.NetworkProtocolNumber +} + +// Endpoint is link layer endpoint that stores outbound packets in a channel +// and allows injection of inbound packets. +type Endpoint struct { + dispatcher stack.NetworkDispatcher + mtu uint32 + linkAddr tcpip.LinkAddress + + // C is where outbound packets are queued. + C chan PacketInfo +} + +// New creates a new channel endpoint. +func New(size int, mtu uint32, linkAddr tcpip.LinkAddress) (tcpip.LinkEndpointID, *Endpoint) { + e := &Endpoint{ + C: make(chan PacketInfo, size), + mtu: mtu, + linkAddr: linkAddr, + } + + return stack.RegisterLinkEndpoint(e), e +} + +// Drain removes all outbound packets from the channel and counts them. +func (e *Endpoint) Drain() int { + c := 0 + for { + select { + case <-e.C: + c++ + default: + return c + } + } +} + +// Inject injects an inbound packet. +func (e *Endpoint) Inject(protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + uu := vv.Clone(nil) + e.dispatcher.DeliverNetworkPacket(e, "", protocol, &uu) +} + +// Attach saves the stack network-layer dispatcher for use later when packets +// are injected. +func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher +} + +// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized +// during construction. +func (e *Endpoint) MTU() uint32 { + return e.mtu +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. +func (*Endpoint) Capabilities() stack.LinkEndpointCapabilities { + return 0 +} + +// MaxHeaderLength returns the maximum size of the link layer header. Given it +// doesn't have a header, it just returns 0. +func (*Endpoint) MaxHeaderLength() uint16 { + return 0 +} + +// LinkAddress returns the link address of this endpoint. +func (e *Endpoint) LinkAddress() tcpip.LinkAddress { + return e.linkAddr +} + +// WritePacket stores outbound packets into the channel. +func (e *Endpoint) WritePacket(_ *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + p := PacketInfo{ + Header: hdr.View(), + Proto: protocol, + } + + if payload != nil { + p.Payload = make(buffer.View, len(payload)) + copy(p.Payload, payload) + } + + select { + case e.C <- p: + default: + } + + return nil +} diff --git a/pkg/tcpip/link/fdbased/BUILD b/pkg/tcpip/link/fdbased/BUILD new file mode 100644 index 000000000..b5ab1ea6a --- /dev/null +++ b/pkg/tcpip/link/fdbased/BUILD @@ -0,0 +1,32 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "fdbased", + srcs = ["endpoint.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/fdbased", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/stack", + ], +) + +go_test( + name = "fdbased_test", + size = "small", + srcs = ["endpoint_test.go"], + embed = [":fdbased"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/fdbased/endpoint.go b/pkg/tcpip/link/fdbased/endpoint.go new file mode 100644 index 000000000..da74cd644 --- /dev/null +++ b/pkg/tcpip/link/fdbased/endpoint.go @@ -0,0 +1,261 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fdbased provides the implemention of data-link layer endpoints +// backed by boundary-preserving file descriptors (e.g., TUN devices, +// seqpacket/datagram sockets). +// +// FD based endpoints can be used in the networking stack by calling New() to +// create a new endpoint, and then passing it as an argument to +// Stack.CreateNIC(). +package fdbased + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// BufConfig defines the shape of the vectorised view used to read packets from the NIC. +var BufConfig = []int{128, 256, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768} + +type endpoint struct { + // fd is the file descriptor used to send and receive packets. + fd int + + // mtu (maximum transmission unit) is the maximum size of a packet. + mtu uint32 + + // hdrSize specifies the link-layer header size. If set to 0, no header + // is added/removed; otherwise an ethernet header is used. + hdrSize int + + // addr is the address of the endpoint. + addr tcpip.LinkAddress + + // caps holds the endpoint capabilities. + caps stack.LinkEndpointCapabilities + + // closed is a function to be called when the FD's peer (if any) closes + // its end of the communication pipe. + closed func(*tcpip.Error) + + vv *buffer.VectorisedView + iovecs []syscall.Iovec + views []buffer.View +} + +// Options specify the details about the fd-based endpoint to be created. +type Options struct { + FD int + MTU uint32 + EthernetHeader bool + ChecksumOffload bool + ClosedFunc func(*tcpip.Error) + Address tcpip.LinkAddress +} + +// New creates a new fd-based endpoint. +// +// Makes fd non-blocking, but does not take ownership of fd, which must remain +// open for the lifetime of the returned endpoint. +func New(opts *Options) tcpip.LinkEndpointID { + syscall.SetNonblock(opts.FD, true) + + caps := stack.LinkEndpointCapabilities(0) + if opts.ChecksumOffload { + caps |= stack.CapabilityChecksumOffload + } + + hdrSize := 0 + if opts.EthernetHeader { + hdrSize = header.EthernetMinimumSize + caps |= stack.CapabilityResolutionRequired + } + + e := &endpoint{ + fd: opts.FD, + mtu: opts.MTU, + caps: caps, + closed: opts.ClosedFunc, + addr: opts.Address, + hdrSize: hdrSize, + views: make([]buffer.View, len(BufConfig)), + iovecs: make([]syscall.Iovec, len(BufConfig)), + } + vv := buffer.NewVectorisedView(0, e.views) + e.vv = &vv + return stack.RegisterLinkEndpoint(e) +} + +// Attach launches the goroutine that reads packets from the file descriptor and +// dispatches them via the provided dispatcher. +func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { + go e.dispatchLoop(dispatcher) // S/R-FIXME +} + +// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized +// during construction. +func (e *endpoint) MTU() uint32 { + return e.mtu +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.caps +} + +// MaxHeaderLength returns the maximum size of the link-layer header. +func (e *endpoint) MaxHeaderLength() uint16 { + return uint16(e.hdrSize) +} + +// LinkAddress returns the link address of this endpoint. +func (e *endpoint) LinkAddress() tcpip.LinkAddress { + return e.addr +} + +// WritePacket writes outbound packets to the file descriptor. If it is not +// currently writable, the packet is dropped. +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + if e.hdrSize > 0 { + // Add ethernet header if needed. + eth := header.Ethernet(hdr.Prepend(header.EthernetMinimumSize)) + eth.Encode(&header.EthernetFields{ + DstAddr: r.RemoteLinkAddress, + SrcAddr: e.addr, + Type: protocol, + }) + } + + if len(payload) == 0 { + return rawfile.NonBlockingWrite(e.fd, hdr.UsedBytes()) + + } + + return rawfile.NonBlockingWrite2(e.fd, hdr.UsedBytes(), payload) +} + +func (e *endpoint) capViews(n int, buffers []int) int { + c := 0 + for i, s := range buffers { + c += s + if c >= n { + e.views[i].CapLength(s - (c - n)) + return i + 1 + } + } + return len(buffers) +} + +func (e *endpoint) allocateViews(bufConfig []int) { + for i, v := range e.views { + if v != nil { + break + } + b := buffer.NewView(bufConfig[i]) + e.views[i] = b + e.iovecs[i] = syscall.Iovec{ + Base: &b[0], + Len: uint64(len(b)), + } + } +} + +// dispatch reads one packet from the file descriptor and dispatches it. +func (e *endpoint) dispatch(d stack.NetworkDispatcher, largeV buffer.View) (bool, *tcpip.Error) { + e.allocateViews(BufConfig) + + n, err := rawfile.BlockingReadv(e.fd, e.iovecs) + if err != nil { + return false, err + } + + if n <= e.hdrSize { + return false, nil + } + + var p tcpip.NetworkProtocolNumber + var addr tcpip.LinkAddress + if e.hdrSize > 0 { + eth := header.Ethernet(e.views[0]) + p = eth.Type() + addr = eth.SourceAddress() + } else { + // We don't get any indication of what the packet is, so try to guess + // if it's an IPv4 or IPv6 packet. + switch header.IPVersion(e.views[0]) { + case header.IPv4Version: + p = header.IPv4ProtocolNumber + case header.IPv6Version: + p = header.IPv6ProtocolNumber + default: + return true, nil + } + } + + used := e.capViews(n, BufConfig) + e.vv.SetViews(e.views[:used]) + e.vv.SetSize(n) + e.vv.TrimFront(e.hdrSize) + + d.DeliverNetworkPacket(e, addr, p, e.vv) + + // Prepare e.views for another packet: release used views. + for i := 0; i < used; i++ { + e.views[i] = nil + } + + return true, nil +} + +// dispatchLoop reads packets from the file descriptor in a loop and dispatches +// them to the network stack. +func (e *endpoint) dispatchLoop(d stack.NetworkDispatcher) *tcpip.Error { + v := buffer.NewView(header.MaxIPPacketSize) + for { + cont, err := e.dispatch(d, v) + if err != nil || !cont { + if e.closed != nil { + e.closed(err) + } + return err + } + } +} + +// InjectableEndpoint is an injectable fd-based endpoint. The endpoint writes +// to the FD, but does not read from it. All reads come from injected packets. +type InjectableEndpoint struct { + endpoint + + dispatcher stack.NetworkDispatcher +} + +// Attach saves the stack network-layer dispatcher for use later when packets +// are injected. +func (e *InjectableEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher +} + +// Inject injects an inbound packet. +func (e *InjectableEndpoint) Inject(protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + e.dispatcher.DeliverNetworkPacket(e, "", protocol, vv) +} + +// NewInjectable creates a new fd-based InjectableEndpoint. +func NewInjectable(fd int, mtu uint32) (tcpip.LinkEndpointID, *InjectableEndpoint) { + syscall.SetNonblock(fd, true) + + e := &InjectableEndpoint{endpoint: endpoint{ + fd: fd, + mtu: mtu, + }} + + return stack.RegisterLinkEndpoint(e), e +} diff --git a/pkg/tcpip/link/fdbased/endpoint_test.go b/pkg/tcpip/link/fdbased/endpoint_test.go new file mode 100644 index 000000000..f7bbb28e1 --- /dev/null +++ b/pkg/tcpip/link/fdbased/endpoint_test.go @@ -0,0 +1,336 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fdbased + +import ( + "fmt" + "math/rand" + "reflect" + "syscall" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +type packetInfo struct { + raddr tcpip.LinkAddress + proto tcpip.NetworkProtocolNumber + contents buffer.View +} + +type context struct { + t *testing.T + fds [2]int + ep stack.LinkEndpoint + ch chan packetInfo + done chan struct{} +} + +func newContext(t *testing.T, opt *Options) *context { + fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_SEQPACKET, 0) + if err != nil { + t.Fatalf("Socketpair failed: %v", err) + } + + done := make(chan struct{}, 1) + opt.ClosedFunc = func(*tcpip.Error) { + done <- struct{}{} + } + + opt.FD = fds[1] + ep := stack.FindLinkEndpoint(New(opt)).(*endpoint) + + c := &context{ + t: t, + fds: fds, + ep: ep, + ch: make(chan packetInfo, 100), + done: done, + } + + ep.Attach(c) + + return c +} + +func (c *context) cleanup() { + syscall.Close(c.fds[0]) + <-c.done + syscall.Close(c.fds[1]) +} + +func (c *context) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + c.ch <- packetInfo{remoteLinkAddr, protocol, vv.ToView()} +} + +func TestNoEthernetProperties(t *testing.T) { + const mtu = 1500 + c := newContext(t, &Options{MTU: mtu}) + defer c.cleanup() + + if want, v := uint16(0), c.ep.MaxHeaderLength(); want != v { + t.Fatalf("MaxHeaderLength() = %v, want %v", v, want) + } + + if want, v := uint32(mtu), c.ep.MTU(); want != v { + t.Fatalf("MTU() = %v, want %v", v, want) + } +} + +func TestEthernetProperties(t *testing.T) { + const mtu = 1500 + c := newContext(t, &Options{EthernetHeader: true, MTU: mtu}) + defer c.cleanup() + + if want, v := uint16(header.EthernetMinimumSize), c.ep.MaxHeaderLength(); want != v { + t.Fatalf("MaxHeaderLength() = %v, want %v", v, want) + } + + if want, v := uint32(mtu), c.ep.MTU(); want != v { + t.Fatalf("MTU() = %v, want %v", v, want) + } +} + +func TestAddress(t *testing.T) { + const mtu = 1500 + addrs := []tcpip.LinkAddress{"", "abc", "def"} + for _, a := range addrs { + t.Run(fmt.Sprintf("Address: %q", a), func(t *testing.T) { + c := newContext(t, &Options{Address: a, MTU: mtu}) + defer c.cleanup() + + if want, v := a, c.ep.LinkAddress(); want != v { + t.Fatalf("LinkAddress() = %v, want %v", v, want) + } + }) + } +} + +func TestWritePacket(t *testing.T) { + const ( + mtu = 1500 + laddr = tcpip.LinkAddress("\x11\x22\x33\x44\x55\x66") + raddr = tcpip.LinkAddress("\x77\x88\x99\xaa\xbb\xcc") + proto = 10 + ) + + lengths := []int{0, 100, 1000} + eths := []bool{true, false} + + for _, eth := range eths { + for _, plen := range lengths { + t.Run(fmt.Sprintf("Eth=%v,PayloadLen=%v", eth, plen), func(t *testing.T) { + c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: eth}) + defer c.cleanup() + + r := &stack.Route{ + RemoteLinkAddress: raddr, + } + + // Build header. + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength()) + 100) + b := hdr.Prepend(100) + for i := range b { + b[i] = uint8(rand.Intn(256)) + } + + // Buiild payload and write. + payload := make([]byte, plen) + for i := range payload { + payload[i] = uint8(rand.Intn(256)) + } + want := append(hdr.UsedBytes(), payload...) + if err := c.ep.WritePacket(r, &hdr, payload, proto); err != nil { + t.Fatalf("WritePacket failed: %v", err) + } + + // Read from fd, then compare with what we wrote. + b = make([]byte, mtu) + n, err := syscall.Read(c.fds[0], b) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + b = b[:n] + if eth { + h := header.Ethernet(b) + b = b[header.EthernetMinimumSize:] + + if a := h.SourceAddress(); a != laddr { + t.Fatalf("SourceAddress() = %v, want %v", a, laddr) + } + + if a := h.DestinationAddress(); a != raddr { + t.Fatalf("DestinationAddress() = %v, want %v", a, raddr) + } + + if et := h.Type(); et != proto { + t.Fatalf("Type() = %v, want %v", et, proto) + } + } + if len(b) != len(want) { + t.Fatalf("Read returned %v bytes, want %v", len(b), len(want)) + } + if !reflect.DeepEqual(b, want) { + t.Fatalf("Read returned %x, want %x", b, want) + } + }) + } + } +} + +func TestDeliverPacket(t *testing.T) { + const ( + mtu = 1500 + laddr = tcpip.LinkAddress("\x11\x22\x33\x44\x55\x66") + raddr = tcpip.LinkAddress("\x77\x88\x99\xaa\xbb\xcc") + proto = 10 + ) + + lengths := []int{100, 1000} + eths := []bool{true, false} + + for _, eth := range eths { + for _, plen := range lengths { + t.Run(fmt.Sprintf("Eth=%v,PayloadLen=%v", eth, plen), func(t *testing.T) { + c := newContext(t, &Options{Address: laddr, MTU: mtu, EthernetHeader: eth}) + defer c.cleanup() + + // Build packet. + b := make([]byte, plen) + all := b + for i := range b { + b[i] = uint8(rand.Intn(256)) + } + + if !eth { + // So that it looks like an IPv4 packet. + b[0] = 0x40 + } else { + hdr := make(header.Ethernet, header.EthernetMinimumSize) + hdr.Encode(&header.EthernetFields{ + SrcAddr: raddr, + DstAddr: laddr, + Type: proto, + }) + all = append(hdr, b...) + } + + // Write packet via the file descriptor. + if _, err := syscall.Write(c.fds[0], all); err != nil { + t.Fatalf("Write failed: %v", err) + } + + // Receive packet through the endpoint. + select { + case pi := <-c.ch: + want := packetInfo{ + raddr: raddr, + proto: proto, + contents: b, + } + if !eth { + want.proto = header.IPv4ProtocolNumber + want.raddr = "" + } + if !reflect.DeepEqual(want, pi) { + t.Fatalf("Unexpected received packet: %+v, want %+v", pi, want) + } + case <-time.After(10 * time.Second): + t.Fatalf("Timed out waiting for packet") + } + }) + } + } +} + +func TestBufConfigMaxLength(t *testing.T) { + got := 0 + for _, i := range BufConfig { + got += i + } + want := header.MaxIPPacketSize // maximum TCP packet size + if got < want { + t.Errorf("total buffer size is invalid: got %d, want >= %d", got, want) + } +} + +func TestBufConfigFirst(t *testing.T) { + // The stack assumes that the TCP/IP header is enterily contained in the first view. + // Therefore, the first view needs to be large enough to contain the maximum TCP/IP + // header, which is 120 bytes (60 bytes for IP + 60 bytes for TCP). + want := 120 + got := BufConfig[0] + if got < want { + t.Errorf("first view has an invalid size: got %d, want >= %d", got, want) + } +} + +func build(bufConfig []int) *endpoint { + e := &endpoint{ + views: make([]buffer.View, len(bufConfig)), + iovecs: make([]syscall.Iovec, len(bufConfig)), + } + e.allocateViews(bufConfig) + return e +} + +var capLengthTestCases = []struct { + comment string + config []int + n int + wantUsed int + wantLengths []int +}{ + { + comment: "Single slice", + config: []int{2}, + n: 1, + wantUsed: 1, + wantLengths: []int{1}, + }, + { + comment: "Multiple slices", + config: []int{1, 2}, + n: 2, + wantUsed: 2, + wantLengths: []int{1, 1}, + }, + { + comment: "Entire buffer", + config: []int{1, 2}, + n: 3, + wantUsed: 2, + wantLengths: []int{1, 2}, + }, + { + comment: "Entire buffer but not on the last slice", + config: []int{1, 2, 3}, + n: 3, + wantUsed: 2, + wantLengths: []int{1, 2, 3}, + }, +} + +func TestCapLength(t *testing.T) { + for _, c := range capLengthTestCases { + e := build(c.config) + used := e.capViews(c.n, c.config) + if used != c.wantUsed { + t.Errorf("Test \"%s\" failed when calling capViews(%d, %v). Got %d. Want %d", c.comment, c.n, c.config, used, c.wantUsed) + } + lengths := make([]int, len(e.views)) + for i, v := range e.views { + lengths[i] = len(v) + } + if !reflect.DeepEqual(lengths, c.wantLengths) { + t.Errorf("Test \"%s\" failed when calling capViews(%d, %v). Got %v. Want %v", c.comment, c.n, c.config, lengths, c.wantLengths) + } + + } +} diff --git a/pkg/tcpip/link/loopback/BUILD b/pkg/tcpip/link/loopback/BUILD new file mode 100644 index 000000000..b454d0839 --- /dev/null +++ b/pkg/tcpip/link/loopback/BUILD @@ -0,0 +1,15 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "loopback", + srcs = ["loopback.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/loopback", + visibility = ["//:sandbox"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/loopback/loopback.go b/pkg/tcpip/link/loopback/loopback.go new file mode 100644 index 000000000..1a9cd09d7 --- /dev/null +++ b/pkg/tcpip/link/loopback/loopback.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loopback provides the implemention of loopback data-link layer +// endpoints. Such endpoints just turn outbound packets into inbound ones. +// +// Loopback endpoints can be used in the networking stack by calling New() to +// create a new endpoint, and then passing it as an argument to +// Stack.CreateNIC(). +package loopback + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +type endpoint struct { + dispatcher stack.NetworkDispatcher +} + +// New creates a new loopback endpoint. This link-layer endpoint just turns +// outbound packets into inbound packets. +func New() tcpip.LinkEndpointID { + return stack.RegisterLinkEndpoint(&endpoint{}) +} + +// Attach implements stack.LinkEndpoint.Attach. It just saves the stack network- +// layer dispatcher for later use when packets need to be dispatched. +func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher +} + +// MTU implements stack.LinkEndpoint.MTU. It returns a constant that matches the +// linux loopback interface. +func (*endpoint) MTU() uint32 { + return 65536 +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. Loopback advertises +// itself as supporting checksum offload, but in reality it's just omitted. +func (*endpoint) Capabilities() stack.LinkEndpointCapabilities { + return stack.CapabilityChecksumOffload +} + +// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. Given that the +// loopback interface doesn't have a header, it just returns 0. +func (*endpoint) MaxHeaderLength() uint16 { + return 0 +} + +// LinkAddress returns the link address of this endpoint. +func (*endpoint) LinkAddress() tcpip.LinkAddress { + return "" +} + +// WritePacket implements stack.LinkEndpoint.WritePacket. It delivers outbound +// packets to the network-layer dispatcher. +func (e *endpoint) WritePacket(_ *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + if len(payload) == 0 { + // We don't have a payload, so just use the buffer from the + // header as the full packet. + v := hdr.View() + vv := v.ToVectorisedView([1]buffer.View{}) + e.dispatcher.DeliverNetworkPacket(e, "", protocol, &vv) + } else { + views := []buffer.View{hdr.View(), payload} + vv := buffer.NewVectorisedView(len(views[0])+len(views[1]), views) + e.dispatcher.DeliverNetworkPacket(e, "", protocol, &vv) + } + + return nil +} diff --git a/pkg/tcpip/link/rawfile/BUILD b/pkg/tcpip/link/rawfile/BUILD new file mode 100644 index 000000000..4c63af0ea --- /dev/null +++ b/pkg/tcpip/link/rawfile/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "rawfile", + srcs = [ + "blockingpoll_amd64.s", + "errors.go", + "rawfile_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile", + visibility = [ + "//visibility:public", + ], + deps = ["//pkg/tcpip"], +) diff --git a/pkg/tcpip/link/rawfile/blockingpoll_amd64.s b/pkg/tcpip/link/rawfile/blockingpoll_amd64.s new file mode 100644 index 000000000..88206fc87 --- /dev/null +++ b/pkg/tcpip/link/rawfile/blockingpoll_amd64.s @@ -0,0 +1,26 @@ +#include "textflag.h" + +// blockingPoll makes the poll() syscall while calling the version of +// entersyscall that relinquishes the P so that other Gs can run. This is meant +// to be called in cases when the syscall is expected to block. +// +// func blockingPoll(fds unsafe.Pointer, nfds int, timeout int64) (n int, err syscall.Errno) +TEXT ·blockingPoll(SB),NOSPLIT,$0-40 + CALL runtime·entersyscallblock(SB) + MOVQ fds+0(FP), DI + MOVQ nfds+8(FP), SI + MOVQ timeout+16(FP), DX + MOVQ $0x7, AX // SYS_POLL + SYSCALL + CMPQ AX, $0xfffffffffffff001 + JLS ok + MOVQ $-1, n+24(FP) + NEGQ AX + MOVQ AX, err+32(FP) + CALL runtime·exitsyscall(SB) + RET +ok: + MOVQ AX, n+24(FP) + MOVQ $0, err+32(FP) + CALL runtime·exitsyscall(SB) + RET diff --git a/pkg/tcpip/link/rawfile/errors.go b/pkg/tcpip/link/rawfile/errors.go new file mode 100644 index 000000000..b6e7b3d71 --- /dev/null +++ b/pkg/tcpip/link/rawfile/errors.go @@ -0,0 +1,40 @@ +package rawfile + +import ( + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +var translations = map[syscall.Errno]*tcpip.Error{ + syscall.EEXIST: tcpip.ErrDuplicateAddress, + syscall.ENETUNREACH: tcpip.ErrNoRoute, + syscall.EINVAL: tcpip.ErrInvalidEndpointState, + syscall.EALREADY: tcpip.ErrAlreadyConnecting, + syscall.EISCONN: tcpip.ErrAlreadyConnected, + syscall.EADDRINUSE: tcpip.ErrPortInUse, + syscall.EADDRNOTAVAIL: tcpip.ErrBadLocalAddress, + syscall.EPIPE: tcpip.ErrClosedForSend, + syscall.EWOULDBLOCK: tcpip.ErrWouldBlock, + syscall.ECONNREFUSED: tcpip.ErrConnectionRefused, + syscall.ETIMEDOUT: tcpip.ErrTimeout, + syscall.EINPROGRESS: tcpip.ErrConnectStarted, + syscall.EDESTADDRREQ: tcpip.ErrDestinationRequired, + syscall.ENOTSUP: tcpip.ErrNotSupported, + syscall.ENOTTY: tcpip.ErrQueueSizeNotSupported, + syscall.ENOTCONN: tcpip.ErrNotConnected, + syscall.ECONNRESET: tcpip.ErrConnectionReset, + syscall.ECONNABORTED: tcpip.ErrConnectionAborted, +} + +// TranslateErrno translate an errno from the syscall package into a +// *tcpip.Error. +// +// Not all errnos are supported and this function will panic on unreconized +// errnos. +func TranslateErrno(e syscall.Errno) *tcpip.Error { + if err, ok := translations[e]; ok { + return err + } + return tcpip.ErrInvalidEndpointState +} diff --git a/pkg/tcpip/link/rawfile/rawfile_unsafe.go b/pkg/tcpip/link/rawfile/rawfile_unsafe.go new file mode 100644 index 000000000..d3660e1b4 --- /dev/null +++ b/pkg/tcpip/link/rawfile/rawfile_unsafe.go @@ -0,0 +1,161 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rawfile contains utilities for using the netstack with raw host +// files on Linux hosts. +package rawfile + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +//go:noescape +func blockingPoll(fds unsafe.Pointer, nfds int, timeout int64) (n int, err syscall.Errno) + +// GetMTU determines the MTU of a network interface device. +func GetMTU(name string) (uint32, error) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_DGRAM, 0) + if err != nil { + return 0, err + } + + defer syscall.Close(fd) + + var ifreq struct { + name [16]byte + mtu int32 + _ [20]byte + } + + copy(ifreq.name[:], name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + if errno != 0 { + return 0, errno + } + + return uint32(ifreq.mtu), nil +} + +// NonBlockingWrite writes the given buffer to a file descriptor. It fails if +// partial data is written. +func NonBlockingWrite(fd int, buf []byte) *tcpip.Error { + var ptr unsafe.Pointer + if len(buf) > 0 { + ptr = unsafe.Pointer(&buf[0]) + } + + _, _, e := syscall.RawSyscall(syscall.SYS_WRITE, uintptr(fd), uintptr(ptr), uintptr(len(buf))) + if e != 0 { + return TranslateErrno(e) + } + + return nil +} + +// NonBlockingWrite2 writes up to two byte slices to a file descriptor in a +// single syscall. It fails if partial data is written. +func NonBlockingWrite2(fd int, b1, b2 []byte) *tcpip.Error { + // If the is no second buffer, issue a regular write. + if len(b2) == 0 { + return NonBlockingWrite(fd, b1) + } + + // We have two buffers. Build the iovec that represents them and issue + // a writev syscall. + iovec := [...]syscall.Iovec{ + { + Base: &b1[0], + Len: uint64(len(b1)), + }, + { + Base: &b2[0], + Len: uint64(len(b2)), + }, + } + + _, _, e := syscall.RawSyscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&iovec[0])), uintptr(len(iovec))) + if e != 0 { + return TranslateErrno(e) + } + + return nil +} + +// NonBlockingWriteN writes up to N byte slices to a file descriptor in a +// single syscall. It fails if partial data is written. +func NonBlockingWriteN(fd int, bs ...[]byte) *tcpip.Error { + iovec := make([]syscall.Iovec, 0, len(bs)) + + for _, b := range bs { + if len(b) == 0 { + continue + } + iovec = append(iovec, syscall.Iovec{ + Base: &b[0], + Len: uint64(len(b)), + }) + } + + _, _, e := syscall.RawSyscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&iovec[0])), uintptr(len(iovec))) + if e != 0 { + return TranslateErrno(e) + } + + return nil +} + +// BlockingRead reads from a file descriptor that is set up as non-blocking. If +// no data is available, it will block in a poll() syscall until the file +// descirptor becomes readable. +func BlockingRead(fd int, b []byte) (int, *tcpip.Error) { + for { + n, _, e := syscall.RawSyscall(syscall.SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if e == 0 { + return int(n), nil + } + + event := struct { + fd int32 + events int16 + revents int16 + }{ + fd: int32(fd), + events: 1, // POLLIN + } + + _, e = blockingPoll(unsafe.Pointer(&event), 1, -1) + if e != 0 && e != syscall.EINTR { + return 0, TranslateErrno(e) + } + } +} + +// BlockingReadv reads from a file descriptor that is set up as non-blocking and +// stores the data in a list of iovecs buffers. If no data is available, it will +// block in a poll() syscall until the file descirptor becomes readable. +func BlockingReadv(fd int, iovecs []syscall.Iovec) (int, *tcpip.Error) { + for { + n, _, e := syscall.RawSyscall(syscall.SYS_READV, uintptr(fd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(len(iovecs))) + if e == 0 { + return int(n), nil + } + + event := struct { + fd int32 + events int16 + revents int16 + }{ + fd: int32(fd), + events: 1, // POLLIN + } + + _, e = blockingPoll(unsafe.Pointer(&event), 1, -1) + if e != 0 && e != syscall.EINTR { + return 0, TranslateErrno(e) + } + } +} diff --git a/pkg/tcpip/link/sharedmem/BUILD b/pkg/tcpip/link/sharedmem/BUILD new file mode 100644 index 000000000..a4a965924 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/BUILD @@ -0,0 +1,42 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "sharedmem", + srcs = [ + "rx.go", + "sharedmem.go", + "sharedmem_unsafe.go", + "tx.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem", + visibility = [ + "//:sandbox", + ], + deps = [ + "//pkg/log", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/link/sharedmem/queue", + "//pkg/tcpip/stack", + ], +) + +go_test( + name = "sharedmem_test", + srcs = [ + "sharedmem_test.go", + ], + embed = [":sharedmem"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/link/sharedmem/pipe", + "//pkg/tcpip/link/sharedmem/queue", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/sharedmem/pipe/BUILD b/pkg/tcpip/link/sharedmem/pipe/BUILD new file mode 100644 index 000000000..e8d795500 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/BUILD @@ -0,0 +1,23 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "pipe", + srcs = [ + "pipe.go", + "pipe_unsafe.go", + "rx.go", + "tx.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe", + visibility = ["//:sandbox"], +) + +go_test( + name = "pipe_test", + srcs = [ + "pipe_test.go", + ], + embed = [":pipe"], +) diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe.go b/pkg/tcpip/link/sharedmem/pipe/pipe.go new file mode 100644 index 000000000..1173a60da --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/pipe.go @@ -0,0 +1,68 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pipe implements a shared memory ring buffer on which a single reader +// and a single writer can operate (read/write) concurrently. The ring buffer +// allows for data of different sizes to be written, and preserves the boundary +// of the written data. +// +// Example usage is as follows: +// +// wb := t.Push(20) +// // Write data to wb. +// t.Flush() +// +// rb := r.Pull() +// // Do something with data in rb. +// t.Flush() +package pipe + +import ( + "math" +) + +const ( + jump uint64 = math.MaxUint32 + 1 + offsetMask uint64 = math.MaxUint32 + revolutionMask uint64 = ^offsetMask + + sizeOfSlotHeader = 8 // sizeof(uint64) + slotFree uint64 = 1 << 63 + slotSizeMask uint64 = math.MaxUint32 +) + +// payloadToSlotSize calculates the total size of a slot based on its payload +// size. The total size is the header size, plus the payload size, plus padding +// if necessary to make the total size a multiple of sizeOfSlotHeader. +func payloadToSlotSize(payloadSize uint64) uint64 { + s := sizeOfSlotHeader + payloadSize + return (s + sizeOfSlotHeader - 1) &^ (sizeOfSlotHeader - 1) +} + +// slotToPayloadSize calculates the payload size of a slot based on the total +// size of the slot. This is only meant to be used when creating slots that +// don't carry information (e.g., free slots or wrap slots). +func slotToPayloadSize(offset uint64) uint64 { + return offset - sizeOfSlotHeader +} + +// pipe is a basic data structure used by both (transmit & receive) ends of a +// pipe. Indices into this pipe are split into two fields: offset, which counts +// the number of bytes from the beginning of the buffer, and revolution, which +// counts the number of times the index has wrapped around. +type pipe struct { + buffer []byte +} + +// init initializes the pipe buffer such that its size is a multiple of the size +// of the slot header. +func (p *pipe) init(b []byte) { + p.buffer = b[:len(b)&^(sizeOfSlotHeader-1)] +} + +// data returns a section of the buffer starting at the given index (which may +// include revolution information) and with the given size. +func (p *pipe) data(idx uint64, size uint64) []byte { + return p.buffer[(idx&offsetMask)+sizeOfSlotHeader:][:size] +} diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe_test.go b/pkg/tcpip/link/sharedmem/pipe/pipe_test.go new file mode 100644 index 000000000..441ff5b25 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/pipe_test.go @@ -0,0 +1,507 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pipe + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "testing" +) + +func TestSimpleReadWrite(t *testing.T) { + // Check that a simple write can be properly read from the rx side. + tr := rand.New(rand.NewSource(99)) + rr := rand.New(rand.NewSource(99)) + + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + wb := tx.Push(10) + if wb == nil { + t.Fatalf("Push failed on empty pipe") + } + for i := range wb { + wb[i] = byte(tr.Intn(256)) + } + tx.Flush() + + var rx Rx + rx.Init(b) + rb := rx.Pull() + if len(rb) != 10 { + t.Fatalf("Bad buffer size returned: got %v, want %v", len(rb), 10) + } + + for i := range rb { + if v := byte(rr.Intn(256)); v != rb[i] { + t.Fatalf("Bad read buffer at index %v: got %v, want %v", i, rb[i], v) + } + } + rx.Flush() +} + +func TestEmptyRead(t *testing.T) { + // Check that pulling from an empty pipe fails. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on empty pipe") + } +} + +func TestTooLargeWrite(t *testing.T) { + // Check that writes that are too large are properly rejected. + b := make([]byte, 96) + var tx Tx + tx.Init(b) + + if wb := tx.Push(96); wb != nil { + t.Fatalf("Write of 96 bytes succeeded on 96-byte pipe") + } + + if wb := tx.Push(88); wb != nil { + t.Fatalf("Write of 88 bytes succeeded on 96-byte pipe") + } + + if wb := tx.Push(80); wb == nil { + t.Fatalf("Write of 80 bytes failed on 96-byte pipe") + } +} + +func TestFullWrite(t *testing.T) { + // Check that writes fail when the pipe is full. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(80); wb == nil { + t.Fatalf("Write of 80 bytes failed on 96-byte pipe") + } + + if wb := tx.Push(1); wb != nil { + t.Fatalf("Write succeeded on full pipe") + } +} + +func TestFullAndFlushedWrite(t *testing.T) { + // Check that writes fail when the pipe is full and has already been + // flushed. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(80); wb == nil { + t.Fatalf("Write of 80 bytes failed on 96-byte pipe") + } + + tx.Flush() + + if wb := tx.Push(1); wb != nil { + t.Fatalf("Write succeeded on full pipe") + } +} + +func TestTxFlushTwice(t *testing.T) { + // Checks that a second consecutive tx flush is a no-op. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + // Make copy of original tx queue, flush it, then check that it didn't + // change. + orig := tx + tx.Flush() + + if !reflect.DeepEqual(orig, tx) { + t.Fatalf("Flush mutated tx pipe: got %v, want %v", tx, orig) + } +} + +func TestRxFlushTwice(t *testing.T) { + // Checks that a second consecutive rx flush is a no-op. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // Make copy of original rx queue, flush it, then check that it didn't + // change. + orig := rx + rx.Flush() + + if !reflect.DeepEqual(orig, rx) { + t.Fatalf("Flush mutated rx pipe: got %v, want %v", rx, orig) + } +} + +func TestWrapInMiddleOfTransaction(t *testing.T) { + // Check that writes are not flushed when we need to wrap the buffer + // around. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // At this point the ring buffer is empty, but the write is at offset + // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). + if wb := tx.Push(10); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on non-full pipe") + } + + // We haven't flushed yet, so pull must return nil. + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on non-flushed pipe") + } + + tx.Flush() + + // The two buffers must be available now. + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } +} + +func TestWriteAbort(t *testing.T) { + // Check that a read fails on a pipe that has had data pushed to it but + // has aborted the push. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(10); wb == nil { + t.Fatalf("Write failed on empty pipe") + } + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on empty pipe") + } + + tx.Abort() + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on empty pipe") + } +} + +func TestWrappedWriteAbort(t *testing.T) { + // Check that writes are properly aborted even if the writes wrap + // around. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // At this point the ring buffer is empty, but the write is at offset + // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). + if wb := tx.Push(10); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on non-full pipe") + } + + // We haven't flushed yet, so pull must return nil. + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on non-flushed pipe") + } + + tx.Abort() + + // The pushes were aborted, so no data should be readable. + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on non-flushed pipe") + } + + // Try the same transactions again, but flush this time. + if wb := tx.Push(10); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on non-full pipe") + } + + tx.Flush() + + // The two buffers must be available now. + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } +} + +func TestEmptyReadOnNonFlushedWrite(t *testing.T) { + // Check that a read fails on a pipe that has had data pushed to it + // but not yet flushed. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(10); wb == nil { + t.Fatalf("Write failed on empty pipe") + } + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on empty pipe") + } + + tx.Flush() + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull on failed on non-empty pipe") + } +} + +func TestPullAfterPullingEntirePipe(t *testing.T) { + // Check that Pull fails when the pipe is full, but all of it has + // already been pulled but not yet flushed. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // At this point the ring buffer is empty, but the write is at offset + // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). Write 3 + // buffers that will fill the pipe. + if wb := tx.Push(10); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + + if wb := tx.Push(20); wb == nil { + t.Fatalf("Push failed on non-full pipe") + } + + if wb := tx.Push(24); wb == nil { + t.Fatalf("Push failed on non-full pipe") + } + + tx.Flush() + + // The three buffers must be available now. + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + + // Fourth pull must fail. + if rb := rx.Pull(); rb != nil { + t.Fatalf("Pull succeeded on empty pipe") + } +} + +func TestNoRoomToWrapOnPush(t *testing.T) { + // Check that Push fails when it tries to allocate room to add a wrap + // message. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + var rx Rx + rx.Init(b) + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // At this point the ring buffer is empty, but the write is at offset + // 64 (50 + sizeOfSlotHeader + padding-for-8-byte-alignment). Write 20, + // which won't fit (64+20+8+padding = 96, which wouldn't leave room for + // the padding), so it wraps around. + if wb := tx.Push(20); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + + tx.Flush() + + // Buffer offset is at 28. Try to write 70, which would require a wrap + // slot which cannot be created now. + if wb := tx.Push(70); wb != nil { + t.Fatalf("Push succeeded on pipe with no room for wrap message") + } +} + +func TestRxImplicitFlushOfWrapMessage(t *testing.T) { + // Check if the first read is that of a wrapping message, that it gets + // immediately flushed. + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + if wb := tx.Push(50); wb == nil { + t.Fatalf("Push failed on empty pipe") + } + tx.Flush() + + // This will cause a wrapping message to written. + if wb := tx.Push(60); wb != nil { + t.Fatalf("Push succeeded when there is no room in pipe") + } + + var rx Rx + rx.Init(b) + + // Read the first message. + if rb := rx.Pull(); rb == nil { + t.Fatalf("Pull failed on non-empty pipe") + } + rx.Flush() + + // This should fail because of the wrapping message is taking up space. + if wb := tx.Push(60); wb != nil { + t.Fatalf("Push succeeded when there is no room in pipe") + } + + // Try to read the next one. This should consume the wrapping message. + rx.Pull() + + // This must now succeed. + if wb := tx.Push(60); wb == nil { + t.Fatalf("Push failed on empty pipe") + } +} + +func TestConcurrentReaderWriter(t *testing.T) { + // Push a million buffers of random sizes and random contents. Check + // that buffers read match what was written. + tr := rand.New(rand.NewSource(99)) + rr := rand.New(rand.NewSource(99)) + + b := make([]byte, 100) + var tx Tx + tx.Init(b) + + var rx Rx + rx.Init(b) + + const count = 1000000 + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runtime.Gosched() + for i := 0; i < count; i++ { + n := 1 + tr.Intn(80) + wb := tx.Push(uint64(n)) + for wb == nil { + wb = tx.Push(uint64(n)) + } + + for j := range wb { + wb[j] = byte(tr.Intn(256)) + } + + tx.Flush() + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + runtime.Gosched() + for i := 0; i < count; i++ { + n := 1 + rr.Intn(80) + rb := rx.Pull() + for rb == nil { + rb = rx.Pull() + } + + if n != len(rb) { + t.Fatalf("Bad %v-th buffer length: got %v, want %v", i, len(rb), n) + } + + for j := range rb { + if v := byte(rr.Intn(256)); v != rb[j] { + t.Fatalf("Bad %v-th read buffer at index %v: got %v, want %v", i, j, rb[j], v) + } + } + + rx.Flush() + } + }() + + wg.Wait() +} diff --git a/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe.go b/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe.go new file mode 100644 index 000000000..d536abedf --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/pipe_unsafe.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pipe + +import ( + "sync/atomic" + "unsafe" +) + +func (p *pipe) write(idx uint64, v uint64) { + ptr := (*uint64)(unsafe.Pointer(&p.buffer[idx&offsetMask:][:8][0])) + *ptr = v +} + +func (p *pipe) writeAtomic(idx uint64, v uint64) { + ptr := (*uint64)(unsafe.Pointer(&p.buffer[idx&offsetMask:][:8][0])) + atomic.StoreUint64(ptr, v) +} + +func (p *pipe) readAtomic(idx uint64) uint64 { + ptr := (*uint64)(unsafe.Pointer(&p.buffer[idx&offsetMask:][:8][0])) + return atomic.LoadUint64(ptr) +} diff --git a/pkg/tcpip/link/sharedmem/pipe/rx.go b/pkg/tcpip/link/sharedmem/pipe/rx.go new file mode 100644 index 000000000..261e21f9e --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/rx.go @@ -0,0 +1,83 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pipe + +// Rx is the receive side of the shared memory ring buffer. +type Rx struct { + p pipe + + tail uint64 + head uint64 +} + +// Init initializes the receive end of the pipe. In the initial state, the next +// slot to be inspected is the very first one. +func (r *Rx) Init(b []byte) { + r.p.init(b) + r.tail = 0xfffffffe * jump + r.head = r.tail +} + +// Pull reads the next buffer from the pipe, returning nil if there isn't one +// currently available. +// +// The returned slice is available until Flush() is next called. After that, it +// must not be touched. +func (r *Rx) Pull() []byte { + if r.head == r.tail+jump { + // We've already pulled the whole pipe. + return nil + } + + header := r.p.readAtomic(r.head) + if header&slotFree != 0 { + // The next slot is free, we can't pull it yet. + return nil + } + + payloadSize := header & slotSizeMask + newHead := r.head + payloadToSlotSize(payloadSize) + headWrap := (r.head & revolutionMask) | uint64(len(r.p.buffer)) + + // Check if this is a wrapping slot. If that's the case, it carries no + // data, so we just skip it and try again from the first slot. + if int64(newHead-headWrap) >= 0 { + if int64(newHead-headWrap) > int64(jump) || newHead&offsetMask != 0 { + return nil + } + + if r.tail == r.head { + // If this is the first pull since the last Flush() + // call, we flush the state so that the sender can use + // this space if it needs to. + r.p.writeAtomic(r.head, slotFree|slotToPayloadSize(newHead-r.head)) + r.tail = newHead + } + + r.head = newHead + return r.Pull() + } + + // Grab the buffer before updating r.head. + b := r.p.data(r.head, payloadSize) + r.head = newHead + return b +} + +// Flush tells the transmitter that all buffers pulled since the last Flush() +// have been used, so the transmitter is free to used their slots for further +// transmission. +func (r *Rx) Flush() { + if r.head == r.tail { + return + } + r.p.writeAtomic(r.tail, slotFree|slotToPayloadSize(r.head-r.tail)) + r.tail = r.head +} + +// Bytes returns the byte slice on which the pipe operates. +func (r *Rx) Bytes() []byte { + return r.p.buffer +} diff --git a/pkg/tcpip/link/sharedmem/pipe/tx.go b/pkg/tcpip/link/sharedmem/pipe/tx.go new file mode 100644 index 000000000..374f515ab --- /dev/null +++ b/pkg/tcpip/link/sharedmem/pipe/tx.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pipe + +// Tx is the transmit side of the shared memory ring buffer. +type Tx struct { + p pipe + maxPayloadSize uint64 + + head uint64 + tail uint64 + next uint64 + + tailHeader uint64 +} + +// Init initializes the transmit end of the pipe. In the initial state, the next +// slot to be written is the very first one, and the transmitter has the whole +// ring buffer available to it. +func (t *Tx) Init(b []byte) { + t.p.init(b) + // maxPayloadSize excludes the header of the payload, and the header + // of the wrapping message. + t.maxPayloadSize = uint64(len(t.p.buffer)) - 2*sizeOfSlotHeader + t.tail = 0xfffffffe * jump + t.next = t.tail + t.head = t.tail + jump + t.p.write(t.tail, slotFree) +} + +// Capacity determines how many records of the given size can be written to the +// pipe before it fills up. +func (t *Tx) Capacity(recordSize uint64) uint64 { + available := uint64(len(t.p.buffer)) - sizeOfSlotHeader + entryLen := payloadToSlotSize(recordSize) + return available / entryLen +} + +// Push reserves "payloadSize" bytes for transmission in the pipe. The caller +// populates the returned slice with the data to be transferred and enventually +// calls Flush() to make the data visible to the reader, or Abort() to make the +// pipe forget all Push() calls since the last Flush(). +// +// The returned slice is available until Flush() or Abort() is next called. +// After that, it must not be touched. +func (t *Tx) Push(payloadSize uint64) []byte { + // Fail request if we know we will never have enough room. + if payloadSize > t.maxPayloadSize { + return nil + } + + totalLen := payloadToSlotSize(payloadSize) + newNext := t.next + totalLen + nextWrap := (t.next & revolutionMask) | uint64(len(t.p.buffer)) + if int64(newNext-nextWrap) >= 0 { + // The new buffer would overflow the pipe, so we push a wrapping + // slot, then try to add the actual slot to the front of the + // pipe. + newNext = (newNext & revolutionMask) + jump + wrappingPayloadSize := slotToPayloadSize(newNext - t.next) + if !t.reclaim(newNext) { + return nil + } + + oldNext := t.next + t.next = newNext + if oldNext != t.tail { + t.p.write(oldNext, wrappingPayloadSize) + } else { + t.tailHeader = wrappingPayloadSize + t.Flush() + } + + newNext += totalLen + } + + // Check that we have enough room for the buffer. + if !t.reclaim(newNext) { + return nil + } + + if t.next != t.tail { + t.p.write(t.next, payloadSize) + } else { + t.tailHeader = payloadSize + } + + // Grab the buffer before updating t.next. + b := t.p.data(t.next, payloadSize) + t.next = newNext + + return b +} + +// reclaim attempts to advance the head until at least newNext. If the head is +// already at or beyond newNext, nothing happens and true is returned; otherwise +// it tries to reclaim slots that have already been consumed by the receive end +// of the pipe (they will be marked as free) and returns a boolean indicating +// whether it was successful in reclaiming enough slots. +func (t *Tx) reclaim(newNext uint64) bool { + for int64(newNext-t.head) > 0 { + // Can't reclaim if slot is not free. + header := t.p.readAtomic(t.head) + if header&slotFree == 0 { + return false + } + + payloadSize := header & slotSizeMask + newHead := t.head + payloadToSlotSize(payloadSize) + + // Check newHead is within bounds and valid. + if int64(newHead-t.tail) > int64(jump) || newHead&offsetMask >= uint64(len(t.p.buffer)) { + return false + } + + t.head = newHead + } + + return true +} + +// Abort causes all Push() calls since the last Flush() to be forgotten and +// therefore they will not be made visible to the receiver. +func (t *Tx) Abort() { + t.next = t.tail +} + +// Flush causes all buffers pushed since the last Flush() [or Abort(), whichever +// is the most recent] to be made visible to the receiver. +func (t *Tx) Flush() { + if t.next == t.tail { + // Nothing to do if there are no pushed buffers. + return + } + + if t.next != t.head { + // The receiver will spin in t.next, so we must make sure that + // the slotFree bit is set. + t.p.write(t.next, slotFree) + } + + t.p.writeAtomic(t.tail, t.tailHeader) + t.tail = t.next +} + +// Bytes returns the byte slice on which the pipe operates. +func (t *Tx) Bytes() []byte { + return t.p.buffer +} diff --git a/pkg/tcpip/link/sharedmem/queue/BUILD b/pkg/tcpip/link/sharedmem/queue/BUILD new file mode 100644 index 000000000..56ea4641d --- /dev/null +++ b/pkg/tcpip/link/sharedmem/queue/BUILD @@ -0,0 +1,28 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "queue", + srcs = [ + "rx.go", + "tx.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue", + visibility = ["//:sandbox"], + deps = [ + "//pkg/log", + "//pkg/tcpip/link/sharedmem/pipe", + ], +) + +go_test( + name = "queue_test", + srcs = [ + "queue_test.go", + ], + embed = [":queue"], + deps = [ + "//pkg/tcpip/link/sharedmem/pipe", + ], +) diff --git a/pkg/tcpip/link/sharedmem/queue/queue_test.go b/pkg/tcpip/link/sharedmem/queue/queue_test.go new file mode 100644 index 000000000..b022c389c --- /dev/null +++ b/pkg/tcpip/link/sharedmem/queue/queue_test.go @@ -0,0 +1,507 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "encoding/binary" + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe" +) + +func TestBasicTxQueue(t *testing.T) { + // Tests that a basic transmit on a queue works, and that completion + // gets properly reported as well. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Tx + q.Init(pb1, pb2) + + // Enqueue two buffers. + b := []TxBuffer{ + {nil, 100, 60}, + {nil, 200, 40}, + } + + b[0].Next = &b[1] + + const usedID = 1002 + const usedTotalSize = 100 + if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) { + t.Fatalf("Enqueue failed on empty queue") + } + + // Check the contents of the pipe. + d := rxp.Pull() + if d == nil { + t.Fatalf("Tx pipe is empty after Enqueue") + } + + want := []byte{ + 234, 3, 0, 0, 0, 0, 0, 0, // id + 100, 0, 0, 0, // total size + 0, 0, 0, 0, // reserved + 100, 0, 0, 0, 0, 0, 0, 0, // offset 1 + 60, 0, 0, 0, // size 1 + 200, 0, 0, 0, 0, 0, 0, 0, // offset 2 + 40, 0, 0, 0, // size 2 + } + + if !reflect.DeepEqual(want, d) { + t.Fatalf("Bad posted packet: got %v, want %v", d, want) + } + + rxp.Flush() + + // Check that there are no completions yet. + if _, ok := q.CompletedPacket(); ok { + t.Fatalf("Packet reported as completed too soon") + } + + // Post a completion. + d = txp.Push(8) + if d == nil { + t.Fatalf("Unable to push to rx pipe") + } + binary.LittleEndian.PutUint64(d, usedID) + txp.Flush() + + // Check that completion is properly reported. + id, ok := q.CompletedPacket() + if !ok { + t.Fatalf("Completion not reported") + } + + if id != usedID { + t.Fatalf("Bad completion id: got %v, want %v", id, usedID) + } +} + +func TestBasicRxQueue(t *testing.T) { + // Tests that a basic receive on a queue works. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Rx + q.Init(pb1, pb2, nil) + + // Post two buffers. + b := []RxBuffer{ + {100, 60, 1077, 0}, + {200, 40, 2123, 0}, + } + + if !q.PostBuffers(b) { + t.Fatalf("PostBuffers failed on empty queue") + } + + // Check the contents of the pipe. + want := [][]byte{ + { + 100, 0, 0, 0, 0, 0, 0, 0, // Offset1 + 60, 0, 0, 0, // Size1 + 0, 0, 0, 0, // Remaining in group 1 + 0, 0, 0, 0, 0, 0, 0, 0, // User data 1 + 53, 4, 0, 0, 0, 0, 0, 0, // ID 1 + }, + { + 200, 0, 0, 0, 0, 0, 0, 0, // Offset2 + 40, 0, 0, 0, // Size2 + 0, 0, 0, 0, // Remaining in group 2 + 0, 0, 0, 0, 0, 0, 0, 0, // User data 2 + 75, 8, 0, 0, 0, 0, 0, 0, // ID 2 + }, + } + + for i := range b { + d := rxp.Pull() + if d == nil { + t.Fatalf("Tx pipe is empty after PostBuffers") + } + + if !reflect.DeepEqual(want[i], d) { + t.Fatalf("Bad posted packet: got %v, want %v", d, want[i]) + } + + rxp.Flush() + } + + // Check that there are no completions. + if _, n := q.Dequeue(nil); n != 0 { + t.Fatalf("Packet reported as received too soon") + } + + // Post a completion. + d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer) + if d == nil { + t.Fatalf("Unable to push to rx pipe") + } + + copy(d, []byte{ + 100, 0, 0, 0, // packet size + 0, 0, 0, 0, // reserved + + 100, 0, 0, 0, 0, 0, 0, 0, // offset 1 + 60, 0, 0, 0, // size 1 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 1 + 53, 4, 0, 0, 0, 0, 0, 0, // ID 1 + + 200, 0, 0, 0, 0, 0, 0, 0, // offset 2 + 40, 0, 0, 0, // size 2 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 2 + 75, 8, 0, 0, 0, 0, 0, 0, // ID 2 + }) + + txp.Flush() + + // Check that completion is properly reported. + bufs, n := q.Dequeue(nil) + if n != 100 { + t.Fatalf("Bad packet size: got %v, want %v", n, 100) + } + + if !reflect.DeepEqual(bufs, b) { + t.Fatalf("Bad returned buffers: got %v, want %v", bufs, b) + } +} + +func TestBadTxCompletion(t *testing.T) { + // Check that tx completions with bad sizes are properly ignored. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Tx + q.Init(pb1, pb2) + + // Post a completion that is too short, and check that it is ignored. + if d := txp.Push(7); d == nil { + t.Fatalf("Unable to push to rx pipe") + } + txp.Flush() + + if _, ok := q.CompletedPacket(); ok { + t.Fatalf("Bad completion not ignored") + } + + // Post a completion that is too long, and check that it is ignored. + if d := txp.Push(10); d == nil { + t.Fatalf("Unable to push to rx pipe") + } + txp.Flush() + + if _, ok := q.CompletedPacket(); ok { + t.Fatalf("Bad completion not ignored") + } +} + +func TestBadRxCompletion(t *testing.T) { + // Check that bad rx completions are properly ignored. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Rx + q.Init(pb1, pb2, nil) + + // Post a completion that is too short, and check that it is ignored. + if d := txp.Push(7); d == nil { + t.Fatalf("Unable to push to rx pipe") + } + txp.Flush() + + if b, _ := q.Dequeue(nil); b != nil { + t.Fatalf("Bad completion not ignored") + } + + // Post a completion whose buffer sizes add up to less than the total + // size. + d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer) + if d == nil { + t.Fatalf("Unable to push to rx pipe") + } + + copy(d, []byte{ + 100, 0, 0, 0, // packet size + 0, 0, 0, 0, // reserved + + 100, 0, 0, 0, 0, 0, 0, 0, // offset 1 + 10, 0, 0, 0, // size 1 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 1 + 53, 4, 0, 0, 0, 0, 0, 0, // ID 1 + + 200, 0, 0, 0, 0, 0, 0, 0, // offset 2 + 10, 0, 0, 0, // size 2 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 2 + 75, 8, 0, 0, 0, 0, 0, 0, // ID 2 + }) + + txp.Flush() + if b, _ := q.Dequeue(nil); b != nil { + t.Fatalf("Bad completion not ignored") + } + + // Post a completion whose buffer sizes will cause a 32-bit overflow, + // but adds up to the right number. + d = txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer) + if d == nil { + t.Fatalf("Unable to push to rx pipe") + } + + copy(d, []byte{ + 100, 0, 0, 0, // packet size + 0, 0, 0, 0, // reserved + + 100, 0, 0, 0, 0, 0, 0, 0, // offset 1 + 255, 255, 255, 255, // size 1 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 1 + 53, 4, 0, 0, 0, 0, 0, 0, // ID 1 + + 200, 0, 0, 0, 0, 0, 0, 0, // offset 2 + 101, 0, 0, 0, // size 2 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 2 + 75, 8, 0, 0, 0, 0, 0, 0, // ID 2 + }) + + txp.Flush() + if b, _ := q.Dequeue(nil); b != nil { + t.Fatalf("Bad completion not ignored") + } +} + +func TestFillTxPipe(t *testing.T) { + // Check that transmitting a new buffer when the buffer pipe is full + // fails gracefully. + pb1 := make([]byte, 104) + pb2 := make([]byte, 104) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Tx + q.Init(pb1, pb2) + + // Transmit twice, which should fill the tx pipe. + b := []TxBuffer{ + {nil, 100, 60}, + {nil, 200, 40}, + } + + b[0].Next = &b[1] + + const usedID = 1002 + const usedTotalSize = 100 + for i := uint64(0); i < 2; i++ { + if !q.Enqueue(usedID+i, usedTotalSize, 2, &b[0]) { + t.Fatalf("Failed to transmit buffer") + } + } + + // Transmit another packet now that the tx pipe is full. + if q.Enqueue(usedID+2, usedTotalSize, 2, &b[0]) { + t.Fatalf("Enqueue succeeded when tx pipe is full") + } +} + +func TestFillRxPipe(t *testing.T) { + // Check that posting a new buffer when the buffer pipe is full fails + // gracefully. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Rx + q.Init(pb1, pb2, nil) + + // Post a buffer twice, it should fill the tx pipe. + b := []RxBuffer{ + {100, 60, 1077, 0}, + } + + for i := 0; i < 2; i++ { + if !q.PostBuffers(b) { + t.Fatalf("PostBuffers failed on non-full queue") + } + } + + // Post another buffer now that the tx pipe is full. + if q.PostBuffers(b) { + t.Fatalf("PostBuffers succeeded on full queue") + } +} + +func TestLotsOfTransmissions(t *testing.T) { + // Make sure pipes are being properly flushed when transmitting packets. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Tx + q.Init(pb1, pb2) + + // Prepare packet with two buffers. + b := []TxBuffer{ + {nil, 100, 60}, + {nil, 200, 40}, + } + + b[0].Next = &b[1] + + const usedID = 1002 + const usedTotalSize = 100 + + // Post 100000 packets and completions. + for i := 100000; i > 0; i-- { + if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) { + t.Fatalf("Enqueue failed on non-full queue") + } + + if d := rxp.Pull(); d == nil { + t.Fatalf("Tx pipe is empty after Enqueue") + } + rxp.Flush() + + d := txp.Push(8) + if d == nil { + t.Fatalf("Unable to write to rx pipe") + } + binary.LittleEndian.PutUint64(d, usedID) + txp.Flush() + if _, ok := q.CompletedPacket(); !ok { + t.Fatalf("Completion not returned") + } + } +} + +func TestLotsOfReceptions(t *testing.T) { + // Make sure pipes are being properly flushed when receiving packets. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var rxp pipe.Rx + rxp.Init(pb1) + + var txp pipe.Tx + txp.Init(pb2) + + var q Rx + q.Init(pb1, pb2, nil) + + // Prepare for posting two buffers. + b := []RxBuffer{ + {100, 60, 1077, 0}, + {200, 40, 2123, 0}, + } + + // Post 100000 buffers and completions. + for i := 100000; i > 0; i-- { + if !q.PostBuffers(b) { + t.Fatalf("PostBuffers failed on non-full queue") + } + + if d := rxp.Pull(); d == nil { + t.Fatalf("Tx pipe is empty after PostBuffers") + } + rxp.Flush() + + if d := rxp.Pull(); d == nil { + t.Fatalf("Tx pipe is empty after PostBuffers") + } + rxp.Flush() + + d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer) + if d == nil { + t.Fatalf("Unable to push to rx pipe") + } + + copy(d, []byte{ + 100, 0, 0, 0, // packet size + 0, 0, 0, 0, // reserved + + 100, 0, 0, 0, 0, 0, 0, 0, // offset 1 + 60, 0, 0, 0, // size 1 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 1 + 53, 4, 0, 0, 0, 0, 0, 0, // ID 1 + + 200, 0, 0, 0, 0, 0, 0, 0, // offset 2 + 40, 0, 0, 0, // size 2 + 0, 0, 0, 0, 0, 0, 0, 0, // user data 2 + 75, 8, 0, 0, 0, 0, 0, 0, // ID 2 + }) + + txp.Flush() + + if _, n := q.Dequeue(nil); n == 0 { + t.Fatalf("Dequeue failed when there is a completion") + } + } +} + +func TestRxEnableNotification(t *testing.T) { + // Check that enabling nofifications results in properly updated state. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var state uint32 + var q Rx + q.Init(pb1, pb2, &state) + + q.EnableNotification() + if state != eventFDEnabled { + t.Fatalf("Bad value in shared state: got %v, want %v", state, eventFDEnabled) + } +} + +func TestRxDisableNotification(t *testing.T) { + // Check that disabling nofifications results in properly updated state. + pb1 := make([]byte, 100) + pb2 := make([]byte, 100) + + var state uint32 + var q Rx + q.Init(pb1, pb2, &state) + + q.DisableNotification() + if state != eventFDDisabled { + t.Fatalf("Bad value in shared state: got %v, want %v", state, eventFDDisabled) + } +} diff --git a/pkg/tcpip/link/sharedmem/queue/rx.go b/pkg/tcpip/link/sharedmem/queue/rx.go new file mode 100644 index 000000000..91bb57190 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/queue/rx.go @@ -0,0 +1,211 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package queue provides the implementation of transmit and receive queues +// based on shared memory ring buffers. +package queue + +import ( + "encoding/binary" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe" +) + +const ( + // Offsets within a posted buffer. + postedOffset = 0 + postedSize = 8 + postedRemainingInGroup = 12 + postedUserData = 16 + postedID = 24 + + sizeOfPostedBuffer = 32 + + // Offsets within a received packet header. + consumedPacketSize = 0 + consumedPacketReserved = 4 + + sizeOfConsumedPacketHeader = 8 + + // Offsets within a consumed buffer. + consumedOffset = 0 + consumedSize = 8 + consumedUserData = 12 + consumedID = 20 + + sizeOfConsumedBuffer = 28 + + // The following are the allowed states of the shared data area. + eventFDUninitialized = 0 + eventFDDisabled = 1 + eventFDEnabled = 2 +) + +// RxBuffer is the descriptor of a receive buffer. +type RxBuffer struct { + Offset uint64 + Size uint32 + ID uint64 + UserData uint64 +} + +// Rx is a receive queue. It is implemented with one tx and one rx pipe: the tx +// pipe is used to "post" buffers, while the rx pipe is used to receive packets +// whose contents have been written to previously posted buffers. +// +// This struct is thread-compatible. +type Rx struct { + tx pipe.Tx + rx pipe.Rx + sharedEventFDState *uint32 +} + +// Init initializes the receive queue with the given pipes, and shared state +// pointer -- the latter is used to enable/disable eventfd notifications. +func (r *Rx) Init(tx, rx []byte, sharedEventFDState *uint32) { + r.sharedEventFDState = sharedEventFDState + r.tx.Init(tx) + r.rx.Init(rx) +} + +// EnableNotification updates the shared state such that the peer will notify +// the eventfd when there are packets to be dequeued. +func (r *Rx) EnableNotification() { + atomic.StoreUint32(r.sharedEventFDState, eventFDEnabled) +} + +// DisableNotification updates the shared state such that the peer will not +// notify the eventfd. +func (r *Rx) DisableNotification() { + atomic.StoreUint32(r.sharedEventFDState, eventFDDisabled) +} + +// PostedBuffersLimit returns the maximum number of buffers that can be posted +// before the tx queue fills up. +func (r *Rx) PostedBuffersLimit() uint64 { + return r.tx.Capacity(sizeOfPostedBuffer) +} + +// PostBuffers makes the given buffers available for receiving data from the +// peer. Once they are posted, the peer is free to write to them and will +// eventually post them back for consumption. +func (r *Rx) PostBuffers(buffers []RxBuffer) bool { + for i := range buffers { + b := r.tx.Push(sizeOfPostedBuffer) + if b == nil { + r.tx.Abort() + return false + } + + pb := &buffers[i] + binary.LittleEndian.PutUint64(b[postedOffset:], pb.Offset) + binary.LittleEndian.PutUint32(b[postedSize:], pb.Size) + binary.LittleEndian.PutUint32(b[postedRemainingInGroup:], 0) + binary.LittleEndian.PutUint64(b[postedUserData:], pb.UserData) + binary.LittleEndian.PutUint64(b[postedID:], pb.ID) + } + + r.tx.Flush() + + return true +} + +// Dequeue receives buffers that have been previously posted by PostBuffers() +// and that have been filled by the peer and posted back. +// +// This is similar to append() in that new buffers are appended to "bufs", with +// reallocation only if "bufs" doesn't have enough capacity. +func (r *Rx) Dequeue(bufs []RxBuffer) ([]RxBuffer, uint32) { + for { + outBufs := bufs + + // Pull the next descriptor from the rx pipe. + b := r.rx.Pull() + if b == nil { + return bufs, 0 + } + + if len(b) < sizeOfConsumedPacketHeader { + log.Warningf("Ignoring packet header: size (%v) is less than header size (%v)", len(b), sizeOfConsumedPacketHeader) + r.rx.Flush() + continue + } + + totalDataSize := binary.LittleEndian.Uint32(b[consumedPacketSize:]) + + // Calculate the number of buffer descriptors and copy them + // over to the output. + count := (len(b) - sizeOfConsumedPacketHeader) / sizeOfConsumedBuffer + offset := sizeOfConsumedPacketHeader + buffersSize := uint32(0) + for i := count; i > 0; i-- { + s := binary.LittleEndian.Uint32(b[offset+consumedSize:]) + buffersSize += s + if buffersSize < s { + // The buffer size overflows an unsigned 32-bit + // integer, so break out and force it to be + // ignored. + totalDataSize = 1 + buffersSize = 0 + break + } + + outBufs = append(outBufs, RxBuffer{ + Offset: binary.LittleEndian.Uint64(b[offset+consumedOffset:]), + Size: s, + ID: binary.LittleEndian.Uint64(b[offset+consumedID:]), + }) + + offset += sizeOfConsumedBuffer + } + + r.rx.Flush() + + if buffersSize < totalDataSize { + // The descriptor is corrupted, ignore it. + log.Warningf("Ignoring packet: actual data size (%v) less than expected size (%v)", buffersSize, totalDataSize) + continue + } + + return outBufs, totalDataSize + } +} + +// Bytes returns the byte slices on which the queue operates. +func (r *Rx) Bytes() (tx, rx []byte) { + return r.tx.Bytes(), r.rx.Bytes() +} + +// DecodeRxBufferHeader decodes the header of a buffer posted on an rx queue. +func DecodeRxBufferHeader(b []byte) RxBuffer { + return RxBuffer{ + Offset: binary.LittleEndian.Uint64(b[postedOffset:]), + Size: binary.LittleEndian.Uint32(b[postedSize:]), + ID: binary.LittleEndian.Uint64(b[postedID:]), + UserData: binary.LittleEndian.Uint64(b[postedUserData:]), + } +} + +// RxCompletionSize returns the number of bytes needed to encode an rx +// completion containing "count" buffers. +func RxCompletionSize(count int) uint64 { + return sizeOfConsumedPacketHeader + uint64(count)*sizeOfConsumedBuffer +} + +// EncodeRxCompletion encodes an rx completion header. +func EncodeRxCompletion(b []byte, size, reserved uint32) { + binary.LittleEndian.PutUint32(b[consumedPacketSize:], size) + binary.LittleEndian.PutUint32(b[consumedPacketReserved:], reserved) +} + +// EncodeRxCompletionBuffer encodes the i-th rx completion buffer header. +func EncodeRxCompletionBuffer(b []byte, i int, rxb RxBuffer) { + b = b[RxCompletionSize(i):] + binary.LittleEndian.PutUint64(b[consumedOffset:], rxb.Offset) + binary.LittleEndian.PutUint32(b[consumedSize:], rxb.Size) + binary.LittleEndian.PutUint64(b[consumedUserData:], rxb.UserData) + binary.LittleEndian.PutUint64(b[consumedID:], rxb.ID) +} diff --git a/pkg/tcpip/link/sharedmem/queue/tx.go b/pkg/tcpip/link/sharedmem/queue/tx.go new file mode 100644 index 000000000..b04fb163b --- /dev/null +++ b/pkg/tcpip/link/sharedmem/queue/tx.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe" +) + +const ( + // Offsets within a packet header. + packetID = 0 + packetSize = 8 + packetReserved = 12 + + sizeOfPacketHeader = 16 + + // Offsets with a buffer descriptor + bufferOffset = 0 + bufferSize = 8 + + sizeOfBufferDescriptor = 12 +) + +// TxBuffer is the descriptor of a transmit buffer. +type TxBuffer struct { + Next *TxBuffer + Offset uint64 + Size uint32 +} + +// Tx is a transmit queue. It is implemented with one tx and one rx pipe: the +// tx pipe is used to request the transmission of packets, while the rx pipe +// is used to receive which transmissions have completed. +// +// This struct is thread-compatible. +type Tx struct { + tx pipe.Tx + rx pipe.Rx +} + +// Init initializes the transmit queue with the given pipes. +func (t *Tx) Init(tx, rx []byte) { + t.tx.Init(tx) + t.rx.Init(rx) +} + +// Enqueue queues the given linked list of buffers for transmission as one +// packet. While it is queued, the caller must not modify them. +func (t *Tx) Enqueue(id uint64, totalDataLen, bufferCount uint32, buffer *TxBuffer) bool { + // Reserve room in the tx pipe. + totalLen := sizeOfPacketHeader + uint64(bufferCount)*sizeOfBufferDescriptor + + b := t.tx.Push(totalLen) + if b == nil { + return false + } + + // Initialize the packet and buffer descriptors. + binary.LittleEndian.PutUint64(b[packetID:], id) + binary.LittleEndian.PutUint32(b[packetSize:], totalDataLen) + binary.LittleEndian.PutUint32(b[packetReserved:], 0) + + offset := sizeOfPacketHeader + for i := bufferCount; i != 0; i-- { + binary.LittleEndian.PutUint64(b[offset+bufferOffset:], buffer.Offset) + binary.LittleEndian.PutUint32(b[offset+bufferSize:], buffer.Size) + offset += sizeOfBufferDescriptor + buffer = buffer.Next + } + + t.tx.Flush() + + return true +} + +// CompletedPacket returns the id of the last completed transmission. The +// returned id, if any, refers to a value passed on a previous call to +// Enqueue(). +func (t *Tx) CompletedPacket() (id uint64, ok bool) { + for { + b := t.rx.Pull() + if b == nil { + return 0, false + } + + if len(b) != 8 { + t.rx.Flush() + log.Warningf("Ignoring completed packet: size (%v) is less than expected (%v)", len(b), 8) + continue + } + + v := binary.LittleEndian.Uint64(b) + + t.rx.Flush() + + return v, true + } +} + +// Bytes returns the byte slices on which the queue operates. +func (t *Tx) Bytes() (tx, rx []byte) { + return t.tx.Bytes(), t.rx.Bytes() +} + +// TxPacketInfo holds information about a packet sent on a tx queue. +type TxPacketInfo struct { + ID uint64 + Size uint32 + Reserved uint32 + BufferCount int +} + +// DecodeTxPacketHeader decodes the header of a packet sent over a tx queue. +func DecodeTxPacketHeader(b []byte) TxPacketInfo { + return TxPacketInfo{ + ID: binary.LittleEndian.Uint64(b[packetID:]), + Size: binary.LittleEndian.Uint32(b[packetSize:]), + Reserved: binary.LittleEndian.Uint32(b[packetReserved:]), + BufferCount: (len(b) - sizeOfPacketHeader) / sizeOfBufferDescriptor, + } +} + +// DecodeTxBufferHeader decodes the header of the i-th buffer of a packet sent +// over a tx queue. +func DecodeTxBufferHeader(b []byte, i int) TxBuffer { + b = b[sizeOfPacketHeader+i*sizeOfBufferDescriptor:] + return TxBuffer{ + Offset: binary.LittleEndian.Uint64(b[bufferOffset:]), + Size: binary.LittleEndian.Uint32(b[bufferSize:]), + } +} + +// EncodeTxCompletion encodes a tx completion header. +func EncodeTxCompletion(b []byte, id uint64) { + binary.LittleEndian.PutUint64(b, id) +} diff --git a/pkg/tcpip/link/sharedmem/rx.go b/pkg/tcpip/link/sharedmem/rx.go new file mode 100644 index 000000000..951ed966b --- /dev/null +++ b/pkg/tcpip/link/sharedmem/rx.go @@ -0,0 +1,147 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sharedmem + +import ( + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue" +) + +// rx holds all state associated with an rx queue. +type rx struct { + data []byte + sharedData []byte + q queue.Rx + eventFD int +} + +// init initializes all state needed by the rx queue based on the information +// provided. +// +// The caller always retains ownership of all file descriptors passed in. The +// queue implementation will duplicate any that it may need in the future. +func (r *rx) init(mtu uint32, c *QueueConfig) error { + // Map in all buffers. + txPipe, err := getBuffer(c.TxPipeFD) + if err != nil { + return err + } + + rxPipe, err := getBuffer(c.RxPipeFD) + if err != nil { + syscall.Munmap(txPipe) + return err + } + + data, err := getBuffer(c.DataFD) + if err != nil { + syscall.Munmap(txPipe) + syscall.Munmap(rxPipe) + return err + } + + sharedData, err := getBuffer(c.SharedDataFD) + if err != nil { + syscall.Munmap(txPipe) + syscall.Munmap(rxPipe) + syscall.Munmap(data) + return err + } + + // Duplicate the eventFD so that caller can close it but we can still + // use it. + efd, err := syscall.Dup(c.EventFD) + if err != nil { + syscall.Munmap(txPipe) + syscall.Munmap(rxPipe) + syscall.Munmap(data) + syscall.Munmap(sharedData) + return err + } + + // Set the eventfd as non-blocking. + if err := syscall.SetNonblock(efd, true); err != nil { + syscall.Munmap(txPipe) + syscall.Munmap(rxPipe) + syscall.Munmap(data) + syscall.Munmap(sharedData) + syscall.Close(efd) + return err + } + + // Initialize state based on buffers. + r.q.Init(txPipe, rxPipe, sharedDataPointer(sharedData)) + r.data = data + r.eventFD = efd + r.sharedData = sharedData + + return nil +} + +// cleanup releases all resources allocated during init(). It must only be +// called if init() has previously succeeded. +func (r *rx) cleanup() { + a, b := r.q.Bytes() + syscall.Munmap(a) + syscall.Munmap(b) + + syscall.Munmap(r.data) + syscall.Munmap(r.sharedData) + syscall.Close(r.eventFD) +} + +// postAndReceive posts the provided buffers (if any), and then tries to read +// from the receive queue. +// +// Capacity permitting, it reuses the posted buffer slice to store the buffers +// that were read as well. +// +// This function will block if there aren't any available packets. +func (r *rx) postAndReceive(b []queue.RxBuffer, stopRequested *uint32) ([]queue.RxBuffer, uint32) { + // Post the buffers first. If we cannot post, sleep until we can. We + // never post more than will fit concurrently, so it's safe to wait + // until enough room is available. + if len(b) != 0 && !r.q.PostBuffers(b) { + r.q.EnableNotification() + for !r.q.PostBuffers(b) { + var tmp [8]byte + rawfile.BlockingRead(r.eventFD, tmp[:]) + if atomic.LoadUint32(stopRequested) != 0 { + r.q.DisableNotification() + return nil, 0 + } + } + r.q.DisableNotification() + } + + // Read the next set of descriptors. + b, n := r.q.Dequeue(b[:0]) + if len(b) != 0 { + return b, n + } + + // Data isn't immediately available. Enable eventfd notifications. + r.q.EnableNotification() + for { + b, n = r.q.Dequeue(b) + if len(b) != 0 { + break + } + + // Wait for notification. + var tmp [8]byte + rawfile.BlockingRead(r.eventFD, tmp[:]) + if atomic.LoadUint32(stopRequested) != 0 { + r.q.DisableNotification() + return nil, 0 + } + } + r.q.DisableNotification() + + return b, n +} diff --git a/pkg/tcpip/link/sharedmem/sharedmem.go b/pkg/tcpip/link/sharedmem/sharedmem.go new file mode 100644 index 000000000..2c0f1b294 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/sharedmem.go @@ -0,0 +1,240 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sharedmem provides the implemention of data-link layer endpoints +// backed by shared memory. +// +// Shared memory endpoints can be used in the networking stack by calling New() +// to create a new endpoint, and then passing it as an argument to +// Stack.CreateNIC(). +package sharedmem + +import ( + "sync" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// QueueConfig holds all the file descriptors needed to describe a tx or rx +// queue over shared memory. It is used when creating new shared memory +// endpoints to describe tx and rx queues. +type QueueConfig struct { + // DataFD is a file descriptor for the file that contains the data to + // be transmitted via this queue. Descriptors contain offsets within + // this file. + DataFD int + + // EventFD is a file descriptor for the event that is signaled when + // data is becomes available in this queue. + EventFD int + + // TxPipeFD is a file descriptor for the tx pipe associated with the + // queue. + TxPipeFD int + + // RxPipeFD is a file descriptor for the rx pipe associated with the + // queue. + RxPipeFD int + + // SharedDataFD is a file descriptor for the file that contains shared + // state between the two ends of the queue. This data specifies, for + // example, whether EventFD signaling is enabled or disabled. + SharedDataFD int +} + +type endpoint struct { + // mtu (maximum transmission unit) is the maximum size of a packet. + mtu uint32 + + // bufferSize is the size of each individual buffer. + bufferSize uint32 + + // addr is the local address of this endpoint. + addr tcpip.LinkAddress + + // rx is the receive queue. + rx rx + + // stopRequested is to be accessed atomically only, and determines if + // the worker goroutines should stop. + stopRequested uint32 + + // Wait group used to indicate that all workers have stopped. + completed sync.WaitGroup + + // mu protects the following fields. + mu sync.Mutex + + // tx is the transmit queue. + tx tx + + // workerStarted specifies whether the worker goroutine was started. + workerStarted bool +} + +// New creates a new shared-memory-based endpoint. Buffers will be broken up +// into buffers of "bufferSize" bytes. +func New(mtu, bufferSize uint32, addr tcpip.LinkAddress, tx, rx QueueConfig) (tcpip.LinkEndpointID, error) { + e := &endpoint{ + mtu: mtu, + bufferSize: bufferSize, + addr: addr, + } + + if err := e.tx.init(bufferSize, &tx); err != nil { + return 0, err + } + + if err := e.rx.init(bufferSize, &rx); err != nil { + e.tx.cleanup() + return 0, err + } + + return stack.RegisterLinkEndpoint(e), nil +} + +// Close frees all resources associated with the endpoint. +func (e *endpoint) Close() { + // Tell dispatch goroutine to stop, then write to the eventfd so that + // it wakes up in case it's sleeping. + atomic.StoreUint32(&e.stopRequested, 1) + syscall.Write(e.rx.eventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Cleanup the queues inline if the worker hasn't started yet; we also + // know it won't start from now on because stopRequested is set to 1. + e.mu.Lock() + workerPresent := e.workerStarted + e.mu.Unlock() + + if !workerPresent { + e.tx.cleanup() + e.rx.cleanup() + } +} + +// Wait waits until all workers have stopped after a Close() call. +func (e *endpoint) Wait() { + e.completed.Wait() +} + +// Attach implements stack.LinkEndpoint.Attach. It launches the goroutine that +// reads packets from the rx queue. +func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.mu.Lock() + if !e.workerStarted && atomic.LoadUint32(&e.stopRequested) == 0 { + e.workerStarted = true + e.completed.Add(1) + go e.dispatchLoop(dispatcher) // S/R-FIXME + } + e.mu.Unlock() +} + +// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized +// during construction. +func (e *endpoint) MTU() uint32 { + return e.mtu - header.EthernetMinimumSize +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. +func (*endpoint) Capabilities() stack.LinkEndpointCapabilities { + return 0 +} + +// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. It returns the +// ethernet frame header size. +func (*endpoint) MaxHeaderLength() uint16 { + return header.EthernetMinimumSize +} + +// LinkAddress implements stack.LinkEndpoint.LinkAddress. It returns the local +// link address. +func (e *endpoint) LinkAddress() tcpip.LinkAddress { + return e.addr +} + +// WritePacket writes outbound packets to the file descriptor. If it is not +// currently writable, the packet is dropped. +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + // Add the ethernet header here. + eth := header.Ethernet(hdr.Prepend(header.EthernetMinimumSize)) + eth.Encode(&header.EthernetFields{ + DstAddr: r.RemoteLinkAddress, + SrcAddr: e.addr, + Type: protocol, + }) + + // Transmit the packet. + e.mu.Lock() + ok := e.tx.transmit(hdr.UsedBytes(), payload) + e.mu.Unlock() + + if !ok { + return tcpip.ErrWouldBlock + } + + return nil +} + +// dispatchLoop reads packets from the rx queue in a loop and dispatches them +// to the network stack. +func (e *endpoint) dispatchLoop(d stack.NetworkDispatcher) { + // Post initial set of buffers. + limit := e.rx.q.PostedBuffersLimit() + if l := uint64(len(e.rx.data)) / uint64(e.bufferSize); limit > l { + limit = l + } + for i := uint64(0); i < limit; i++ { + b := queue.RxBuffer{ + Offset: i * uint64(e.bufferSize), + Size: e.bufferSize, + ID: i, + } + if !e.rx.q.PostBuffers([]queue.RxBuffer{b}) { + log.Warningf("Unable to post %v-th buffer", i) + } + } + + // Read in a loop until a stop is requested. + var rxb []queue.RxBuffer + views := []buffer.View{nil} + vv := buffer.NewVectorisedView(0, views) + for atomic.LoadUint32(&e.stopRequested) == 0 { + var n uint32 + rxb, n = e.rx.postAndReceive(rxb, &e.stopRequested) + + // Copy data from the shared area to its own buffer, then + // prepare to repost the buffer. + b := make([]byte, n) + offset := uint32(0) + for i := range rxb { + copy(b[offset:], e.rx.data[rxb[i].Offset:][:rxb[i].Size]) + offset += rxb[i].Size + + rxb[i].Size = e.bufferSize + } + + if n < header.EthernetMinimumSize { + continue + } + + // Send packet up the stack. + eth := header.Ethernet(b) + views[0] = b[header.EthernetMinimumSize:] + vv.SetSize(int(n) - header.EthernetMinimumSize) + d.DeliverNetworkPacket(e, eth.SourceAddress(), eth.Type(), &vv) + } + + // Clean state. + e.tx.cleanup() + e.rx.cleanup() + + e.completed.Done() +} diff --git a/pkg/tcpip/link/sharedmem/sharedmem_test.go b/pkg/tcpip/link/sharedmem/sharedmem_test.go new file mode 100644 index 000000000..f71e4751f --- /dev/null +++ b/pkg/tcpip/link/sharedmem/sharedmem_test.go @@ -0,0 +1,703 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sharedmem + +import ( + "io/ioutil" + "math/rand" + "os" + "reflect" + "sync" + "syscall" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/pipe" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + localLinkAddr = "\xde\xad\xbe\xef\x56\x78" + remoteLinkAddr = "\xde\xad\xbe\xef\x12\x34" + + queueDataSize = 1024 * 1024 + queuePipeSize = 4096 +) + +type queueBuffers struct { + data []byte + rx pipe.Tx + tx pipe.Rx +} + +func initQueue(t *testing.T, q *queueBuffers, c *QueueConfig) { + // Prepare tx pipe. + b, err := getBuffer(c.TxPipeFD) + if err != nil { + t.Fatalf("getBuffer failed: %v", err) + } + q.tx.Init(b) + + // Prepare rx pipe. + b, err = getBuffer(c.RxPipeFD) + if err != nil { + t.Fatalf("getBuffer failed: %v", err) + } + q.rx.Init(b) + + // Get data slice. + q.data, err = getBuffer(c.DataFD) + if err != nil { + t.Fatalf("getBuffer failed: %v", err) + } +} + +func (q *queueBuffers) cleanup() { + syscall.Munmap(q.tx.Bytes()) + syscall.Munmap(q.rx.Bytes()) + syscall.Munmap(q.data) +} + +type packetInfo struct { + addr tcpip.LinkAddress + proto tcpip.NetworkProtocolNumber + vv buffer.VectorisedView +} + +type testContext struct { + t *testing.T + ep *endpoint + txCfg QueueConfig + rxCfg QueueConfig + txq queueBuffers + rxq queueBuffers + + packetCh chan struct{} + mu sync.Mutex + packets []packetInfo +} + +func newTestContext(t *testing.T, mtu, bufferSize uint32, addr tcpip.LinkAddress) *testContext { + var err error + c := &testContext{ + t: t, + packetCh: make(chan struct{}, 1000000), + } + c.txCfg = createQueueFDs(t, queueSizes{ + dataSize: queueDataSize, + txPipeSize: queuePipeSize, + rxPipeSize: queuePipeSize, + sharedDataSize: 4096, + }) + + c.rxCfg = createQueueFDs(t, queueSizes{ + dataSize: queueDataSize, + txPipeSize: queuePipeSize, + rxPipeSize: queuePipeSize, + sharedDataSize: 4096, + }) + + initQueue(t, &c.txq, &c.txCfg) + initQueue(t, &c.rxq, &c.rxCfg) + + id, err := New(mtu, bufferSize, addr, c.txCfg, c.rxCfg) + if err != nil { + t.Fatalf("New failed: %v", err) + } + + c.ep = stack.FindLinkEndpoint(id).(*endpoint) + c.ep.Attach(c) + + return c +} + +func (c *testContext) DeliverNetworkPacket(_ stack.LinkEndpoint, remoteAddr tcpip.LinkAddress, proto tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + c.mu.Lock() + c.packets = append(c.packets, packetInfo{ + addr: remoteAddr, + proto: proto, + vv: vv.Clone(nil), + }) + c.mu.Unlock() + + c.packetCh <- struct{}{} +} + +func (c *testContext) cleanup() { + c.ep.Close() + closeFDs(&c.txCfg) + closeFDs(&c.rxCfg) + c.txq.cleanup() + c.rxq.cleanup() +} + +func (c *testContext) waitForPackets(n int, to <-chan time.Time, errorStr string) { + for i := 0; i < n; i++ { + select { + case <-c.packetCh: + case <-to: + c.t.Fatalf(errorStr) + } + } +} + +func (c *testContext) pushRxCompletion(size uint32, bs []queue.RxBuffer) { + b := c.rxq.rx.Push(queue.RxCompletionSize(len(bs))) + queue.EncodeRxCompletion(b, size, 0) + for i := range bs { + queue.EncodeRxCompletionBuffer(b, i, queue.RxBuffer{ + Offset: bs[i].Offset, + Size: bs[i].Size, + ID: bs[i].ID, + }) + } +} + +func randomFill(b []byte) { + for i := range b { + b[i] = byte(rand.Intn(256)) + } +} + +func shuffle(b []int) { + for i := len(b) - 1; i >= 0; i-- { + j := rand.Intn(i + 1) + b[i], b[j] = b[j], b[i] + } +} + +func createFile(t *testing.T, size int64, initQueue bool) int { + tmpDir := os.Getenv("TEST_TMPDIR") + if tmpDir == "" { + tmpDir = os.Getenv("TMPDIR") + } + f, err := ioutil.TempFile(tmpDir, "sharedmem_test") + if err != nil { + t.Fatalf("TempFile failed: %v", err) + } + defer f.Close() + syscall.Unlink(f.Name()) + + if initQueue { + // Write the "slot-free" flag in the initial queue. + _, err := f.WriteAt([]byte{0, 0, 0, 0, 0, 0, 0, 0x80}, 0) + if err != nil { + t.Fatalf("WriteAt failed: %v", err) + } + } + + fd, err := syscall.Dup(int(f.Fd())) + if err != nil { + t.Fatalf("Dup failed: %v", err) + } + + if err := syscall.Ftruncate(fd, size); err != nil { + syscall.Close(fd) + t.Fatalf("Ftruncate failed: %v", err) + } + + return fd +} + +func closeFDs(c *QueueConfig) { + syscall.Close(c.DataFD) + syscall.Close(c.EventFD) + syscall.Close(c.TxPipeFD) + syscall.Close(c.RxPipeFD) + syscall.Close(c.SharedDataFD) +} + +type queueSizes struct { + dataSize int64 + txPipeSize int64 + rxPipeSize int64 + sharedDataSize int64 +} + +func createQueueFDs(t *testing.T, s queueSizes) QueueConfig { + fd, _, err := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, 0, 0) + if err != 0 { + t.Fatalf("eventfd failed: %v", error(err)) + } + + return QueueConfig{ + EventFD: int(fd), + DataFD: createFile(t, s.dataSize, false), + TxPipeFD: createFile(t, s.txPipeSize, true), + RxPipeFD: createFile(t, s.rxPipeSize, true), + SharedDataFD: createFile(t, s.sharedDataSize, false), + } +} + +// TestSimpleSend sends 1000 packets with random header and payload sizes, +// then checks that the right payload is received on the shared memory queues. +func TestSimpleSend(t *testing.T) { + c := newTestContext(t, 20000, 1500, localLinkAddr) + defer c.cleanup() + + // Prepare route. + r := stack.Route{ + RemoteLinkAddress: remoteLinkAddr, + } + + for iters := 1000; iters > 0; iters-- { + // Prepare and send packet. + n := rand.Intn(10000) + hdr := buffer.NewPrependable(n + int(c.ep.MaxHeaderLength())) + hdrBuf := hdr.Prepend(n) + randomFill(hdrBuf) + + n = rand.Intn(10000) + buf := buffer.NewView(n) + randomFill(buf) + + proto := tcpip.NetworkProtocolNumber(rand.Intn(0x10000)) + err := c.ep.WritePacket(&r, &hdr, buf, proto) + if err != nil { + t.Fatalf("WritePacket failed: %v", err) + } + + // Receive packet. + desc := c.txq.tx.Pull() + pi := queue.DecodeTxPacketHeader(desc) + contents := make([]byte, 0, pi.Size) + for i := 0; i < pi.BufferCount; i++ { + bi := queue.DecodeTxBufferHeader(desc, i) + contents = append(contents, c.txq.data[bi.Offset:][:bi.Size]...) + } + c.txq.tx.Flush() + + if pi.Reserved != 0 { + t.Fatalf("Reserved value is non-zero: 0x%x", pi.Reserved) + } + + // Check the thernet header. + ethTemplate := make(header.Ethernet, header.EthernetMinimumSize) + ethTemplate.Encode(&header.EthernetFields{ + SrcAddr: localLinkAddr, + DstAddr: remoteLinkAddr, + Type: proto, + }) + if got := contents[:header.EthernetMinimumSize]; !reflect.DeepEqual(got, []byte(ethTemplate)) { + t.Fatalf("Bad ethernet header in packet: got %x, want %x", got, ethTemplate) + } + + // Compare contents skipping the ethernet header added by the + // endpoint. + merged := append(hdrBuf, buf...) + if uint32(len(contents)) < pi.Size { + t.Fatalf("Sum of buffers is less than packet size: %v < %v", len(contents), pi.Size) + } + contents = contents[:pi.Size][header.EthernetMinimumSize:] + + if !reflect.DeepEqual(contents, merged) { + t.Fatalf("Buffers are different: got %x (%v bytes), want %x (%v bytes)", contents, len(contents), merged, len(merged)) + } + + // Tell the endpoint about the completion of the write. + b := c.txq.rx.Push(8) + queue.EncodeTxCompletion(b, pi.ID) + c.txq.rx.Flush() + } +} + +// TestFillTxQueue sends packets until the queue is full. +func TestFillTxQueue(t *testing.T) { + c := newTestContext(t, 20000, 1500, localLinkAddr) + defer c.cleanup() + + // Prepare to send a packet. + r := stack.Route{ + RemoteLinkAddress: remoteLinkAddr, + } + + buf := buffer.NewView(100) + + // Each packet is uses no more than 40 bytes, so write that many packets + // until the tx queue if full. + ids := make(map[uint64]struct{}) + for i := queuePipeSize / 40; i > 0; i-- { + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } + + // Check that they have different IDs. + desc := c.txq.tx.Pull() + pi := queue.DecodeTxPacketHeader(desc) + if _, ok := ids[pi.ID]; ok { + t.Fatalf("ID (%v) reused", pi.ID) + } + ids[pi.ID] = struct{}{} + } + + // Next attempt to write must fail. + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber) + if want := tcpip.ErrWouldBlock; err != want { + t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) + } +} + +// TestFillTxQueueAfterBadCompletion sends a bad completion, then sends packets +// until the queue is full. +func TestFillTxQueueAfterBadCompletion(t *testing.T) { + c := newTestContext(t, 20000, 1500, localLinkAddr) + defer c.cleanup() + + // Send a bad completion. + queue.EncodeTxCompletion(c.txq.rx.Push(8), 1) + c.txq.rx.Flush() + + // Prepare to send a packet. + r := stack.Route{ + RemoteLinkAddress: remoteLinkAddr, + } + + buf := buffer.NewView(100) + + // Send two packets so that the id slice has at least two slots. + for i := 2; i > 0; i-- { + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } + } + + // Complete the two writes twice. + for i := 2; i > 0; i-- { + pi := queue.DecodeTxPacketHeader(c.txq.tx.Pull()) + + queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID) + queue.EncodeTxCompletion(c.txq.rx.Push(8), pi.ID) + c.txq.rx.Flush() + } + c.txq.tx.Flush() + + // Each packet is uses no more than 40 bytes, so write that many packets + // until the tx queue if full. + ids := make(map[uint64]struct{}) + for i := queuePipeSize / 40; i > 0; i-- { + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } + + // Check that they have different IDs. + desc := c.txq.tx.Pull() + pi := queue.DecodeTxPacketHeader(desc) + if _, ok := ids[pi.ID]; ok { + t.Fatalf("ID (%v) reused", pi.ID) + } + ids[pi.ID] = struct{}{} + } + + // Next attempt to write must fail. + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber) + if want := tcpip.ErrWouldBlock; err != want { + t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) + } +} + +// TestFillTxMemory sends packets until the we run out of shared memory. +func TestFillTxMemory(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + defer c.cleanup() + + // Prepare to send a packet. + r := stack.Route{ + RemoteLinkAddress: remoteLinkAddr, + } + + buf := buffer.NewView(100) + + // Each packet is uses up one buffer, so write as many as possible until + // we fill the memory. + ids := make(map[uint64]struct{}) + for i := queueDataSize / bufferSize; i > 0; i-- { + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } + + // Check that they have different IDs. + desc := c.txq.tx.Pull() + pi := queue.DecodeTxPacketHeader(desc) + if _, ok := ids[pi.ID]; ok { + t.Fatalf("ID (%v) reused", pi.ID) + } + ids[pi.ID] = struct{}{} + c.txq.tx.Flush() + } + + // Next attempt to write must fail. + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber) + if want := tcpip.ErrWouldBlock; err != want { + t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) + } +} + +// TestFillTxMemoryWithMultiBuffer sends packets until the we run out of +// shared memory for a 2-buffer packet, but still with room for a 1-buffer +// packet. +func TestFillTxMemoryWithMultiBuffer(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + defer c.cleanup() + + // Prepare to send a packet. + r := stack.Route{ + RemoteLinkAddress: remoteLinkAddr, + } + + buf := buffer.NewView(100) + + // Each packet is uses up one buffer, so write as many as possible + // until there is only one buffer left. + for i := queueDataSize/bufferSize - 1; i > 0; i-- { + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } + + // Pull the posted buffer. + c.txq.tx.Pull() + c.txq.tx.Flush() + } + + // Attempt to write a two-buffer packet. It must fail. + hdr := buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + err := c.ep.WritePacket(&r, &hdr, buffer.NewView(bufferSize), header.IPv4ProtocolNumber) + if want := tcpip.ErrWouldBlock; err != want { + t.Fatalf("WritePacket return unexpected result: got %v, want %v", err, want) + } + + // Attempt to write a one-buffer packet. It must succeed. + hdr = buffer.NewPrependable(int(c.ep.MaxHeaderLength())) + if err := c.ep.WritePacket(&r, &hdr, buf, header.IPv4ProtocolNumber); err != nil { + t.Fatalf("WritePacket failed unexpectedly: %v", err) + } +} + +func pollPull(t *testing.T, p *pipe.Rx, to <-chan time.Time, errStr string) []byte { + for { + b := p.Pull() + if b != nil { + return b + } + + select { + case <-time.After(10 * time.Millisecond): + case <-to: + t.Fatalf(errStr) + } + } +} + +// TestSimpleReceive completes 1000 different receives with random payload and +// random number of buffers. It checks that the contents match the expected +// values. +func TestSimpleReceive(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + defer c.cleanup() + + // Check that buffers have been posted. + limit := c.ep.rx.q.PostedBuffersLimit() + timeout := time.After(2 * time.Second) + for i := uint64(0); i < limit; i++ { + bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers to be posted")) + + if want := i * bufferSize; want != bi.Offset { + t.Fatalf("Bad posted offset: got %v, want %v", bi.Offset, want) + } + + if want := i; want != bi.ID { + t.Fatalf("Bad posted ID: got %v, want %v", bi.ID, want) + } + + if bufferSize != bi.Size { + t.Fatalf("Bad posted bufferSize: got %v, want %v", bi.Size, bufferSize) + } + } + c.rxq.tx.Flush() + + // Create a slice with the indices 0..limit-1. + idx := make([]int, limit) + for i := range idx { + idx[i] = i + } + + // Complete random packets 1000 times. + for iters := 1000; iters > 0; iters-- { + // Prepare a random packet. + shuffle(idx) + n := 1 + rand.Intn(10) + bufs := make([]queue.RxBuffer, n) + contents := make([]byte, bufferSize*n-rand.Intn(500)) + randomFill(contents) + for i := range bufs { + j := idx[i] + bufs[i].Size = bufferSize + bufs[i].Offset = uint64(bufferSize * j) + bufs[i].ID = uint64(j) + + copy(c.rxq.data[bufs[i].Offset:][:bufferSize], contents[i*bufferSize:]) + } + + // Push completion. + c.pushRxCompletion(uint32(len(contents)), bufs) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Wait for packet to be received, then check it. + c.waitForPackets(1, time.After(time.Second), "Error waiting for packet") + c.mu.Lock() + rcvd := []byte(c.packets[0].vv.First()) + c.packets = c.packets[:0] + c.mu.Unlock() + + contents = contents[header.EthernetMinimumSize:] + if !reflect.DeepEqual(contents, rcvd) { + t.Fatalf("Unexpected buffer contents: got %x, want %x", rcvd, contents) + } + + // Check that buffers have been reposted. + for i := range bufs { + bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffers to be reposted")) + if !reflect.DeepEqual(bi, bufs[i]) { + t.Fatalf("Unexpected buffer reposted: got %x, want %x", bi, bufs[i]) + } + } + c.rxq.tx.Flush() + } +} + +// TestRxBuffersReposted tests that rx buffers get reposted after they have been +// completed. +func TestRxBuffersReposted(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + defer c.cleanup() + + // Receive all posted buffers. + limit := c.ep.rx.q.PostedBuffersLimit() + buffers := make([]queue.RxBuffer, 0, limit) + timeout := time.After(2 * time.Second) + for i := limit; i > 0; i-- { + buffers = append(buffers, queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for all buffers"))) + } + c.rxq.tx.Flush() + + // Check that all buffers are reposted when individually completed. + timeout = time.After(2 * time.Second) + for i := range buffers { + // Complete the buffer. + c.pushRxCompletion(buffers[i].Size, buffers[i:][:1]) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Wait for it to be reposted. + bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted")) + if !reflect.DeepEqual(bi, buffers[i]) { + t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[i]) + } + } + c.rxq.tx.Flush() + + // Check that all buffers are reposted when completed in pairs. + timeout = time.After(2 * time.Second) + for i := 0; i < len(buffers)/2; i++ { + // Complete with two buffers. + c.pushRxCompletion(2*bufferSize, buffers[2*i:][:2]) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Wait for them to be reposted. + bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted")) + if !reflect.DeepEqual(bi, buffers[2*i]) { + t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i]) + } + bi = queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, timeout, "Timeout waiting for buffer to be reposted")) + if !reflect.DeepEqual(bi, buffers[2*i+1]) { + t.Fatalf("Different buffer posted: got %v, want %v", bi, buffers[2*i+1]) + } + } + c.rxq.tx.Flush() +} + +// TestReceivePostingIsFull checks that the endpoint will properly handle the +// case when a received buffer cannot be immediately reposted because it hasn't +// been pulled from the tx pipe yet. +func TestReceivePostingIsFull(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + defer c.cleanup() + + // Complete first posted buffer before flushing it from the tx pipe. + first := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for first buffer to be posted")) + c.pushRxCompletion(first.Size, []queue.RxBuffer{first}) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Check that packet is received. + c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet") + + // Complete another buffer. + second := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for second buffer to be posted")) + c.pushRxCompletion(second.Size, []queue.RxBuffer{second}) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Check that no packet is received yet, as the worker is blocked trying + // to repost. + select { + case <-time.After(500 * time.Millisecond): + case <-c.packetCh: + t.Fatalf("Unexpected packet received") + } + + // Flush tx queue, which will allow the first buffer to be reposted, + // and the second completion to be pulled. + c.rxq.tx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Check that second packet completes. + c.waitForPackets(1, time.After(time.Second), "Timeout waiting for second completed packet") +} + +// TestCloseWhileWaitingToPost closes the endpoint while it is waiting to +// repost a buffer. Make sure it backs out. +func TestCloseWhileWaitingToPost(t *testing.T) { + const bufferSize = 1500 + c := newTestContext(t, 20000, bufferSize, localLinkAddr) + cleaned := false + defer func() { + if !cleaned { + c.cleanup() + } + }() + + // Complete first posted buffer before flushing it from the tx pipe. + bi := queue.DecodeRxBufferHeader(pollPull(t, &c.rxq.tx, time.After(time.Second), "Timeout waiting for initial buffer to be posted")) + c.pushRxCompletion(bi.Size, []queue.RxBuffer{bi}) + c.rxq.rx.Flush() + syscall.Write(c.rxCfg.EventFD, []byte{1, 0, 0, 0, 0, 0, 0, 0}) + + // Wait for packet to be indicated. + c.waitForPackets(1, time.After(time.Second), "Timeout waiting for completed packet") + + // Cleanup and wait for worker to complete. + c.cleanup() + cleaned = true + c.ep.Wait() +} diff --git a/pkg/tcpip/link/sharedmem/sharedmem_unsafe.go b/pkg/tcpip/link/sharedmem/sharedmem_unsafe.go new file mode 100644 index 000000000..52f93f480 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/sharedmem_unsafe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sharedmem + +import ( + "unsafe" +) + +// sharedDataPointer converts the shared data slice into a pointer so that it +// can be used in atomic operations. +func sharedDataPointer(sharedData []byte) *uint32 { + return (*uint32)(unsafe.Pointer(&sharedData[0:4][0])) +} diff --git a/pkg/tcpip/link/sharedmem/tx.go b/pkg/tcpip/link/sharedmem/tx.go new file mode 100644 index 000000000..bca1d79b4 --- /dev/null +++ b/pkg/tcpip/link/sharedmem/tx.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sharedmem + +import ( + "math" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sharedmem/queue" +) + +const ( + nilID = math.MaxUint64 +) + +// tx holds all state associated with a tx queue. +type tx struct { + data []byte + q queue.Tx + ids idManager + bufs bufferManager +} + +// init initializes all state needed by the tx queue based on the information +// provided. +// +// The caller always retains ownership of all file descriptors passed in. The +// queue implementation will duplicate any that it may need in the future. +func (t *tx) init(mtu uint32, c *QueueConfig) error { + // Map in all buffers. + txPipe, err := getBuffer(c.TxPipeFD) + if err != nil { + return err + } + + rxPipe, err := getBuffer(c.RxPipeFD) + if err != nil { + syscall.Munmap(txPipe) + return err + } + + data, err := getBuffer(c.DataFD) + if err != nil { + syscall.Munmap(txPipe) + syscall.Munmap(rxPipe) + return err + } + + // Initialize state based on buffers. + t.q.Init(txPipe, rxPipe) + t.ids.init() + t.bufs.init(0, len(data), int(mtu)) + t.data = data + + return nil +} + +// cleanup releases all resources allocated during init(). It must only be +// called if init() has previously succeeded. +func (t *tx) cleanup() { + a, b := t.q.Bytes() + syscall.Munmap(a) + syscall.Munmap(b) + syscall.Munmap(t.data) +} + +// transmit sends a packet made up of up to two buffers. Returns a boolean that +// specifies whether the packet was successfully transmitted. +func (t *tx) transmit(a, b []byte) bool { + // Pull completions from the tx queue and add their buffers back to the + // pool so that we can reuse them. + for { + id, ok := t.q.CompletedPacket() + if !ok { + break + } + + if buf := t.ids.remove(id); buf != nil { + t.bufs.free(buf) + } + } + + bSize := t.bufs.entrySize + total := uint32(len(a) + len(b)) + bufCount := (total + bSize - 1) / bSize + + // Allocate enough buffers to hold all the data. + var buf *queue.TxBuffer + for i := bufCount; i != 0; i-- { + b := t.bufs.alloc() + if b == nil { + // Failed to get all buffers. Return to the pool + // whatever we had managed to get. + if buf != nil { + t.bufs.free(buf) + } + return false + } + b.Next = buf + buf = b + } + + // Copy data into allocated buffers. + nBuf := buf + var dBuf []byte + for _, data := range [][]byte{a, b} { + for len(data) > 0 { + if len(dBuf) == 0 { + dBuf = t.data[nBuf.Offset:][:nBuf.Size] + nBuf = nBuf.Next + } + n := copy(dBuf, data) + data = data[n:] + dBuf = dBuf[n:] + } + } + + // Get an id for this packet and send it out. + id := t.ids.add(buf) + if !t.q.Enqueue(id, total, bufCount, buf) { + t.ids.remove(id) + t.bufs.free(buf) + return false + } + + return true +} + +// getBuffer returns a memory region mapped to the full contents of the given +// file descriptor. +func getBuffer(fd int) ([]byte, error) { + var s syscall.Stat_t + if err := syscall.Fstat(fd, &s); err != nil { + return nil, err + } + + // Check that size doesn't overflow an int. + if s.Size > int64(^uint(0)>>1) { + return nil, syscall.EDOM + } + + return syscall.Mmap(fd, 0, int(s.Size), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED|syscall.MAP_FILE) +} + +// idDescriptor is used by idManager to either point to a tx buffer (in case +// the ID is assigned) or to the next free element (if the id is not assigned). +type idDescriptor struct { + buf *queue.TxBuffer + nextFree uint64 +} + +// idManager is a manager of tx buffer identifiers. It assigns unique IDs to +// tx buffers that are added to it; the IDs can only be reused after they have +// been removed. +// +// The ID assignments are stored so that the tx buffers can be retrieved from +// the IDs previously assigned to them. +type idManager struct { + // ids is a slice containing all tx buffers. The ID is the index into + // this slice. + ids []idDescriptor + + // freeList a list of free IDs. + freeList uint64 +} + +// init initializes the id manager. +func (m *idManager) init() { + m.freeList = nilID +} + +// add assigns an ID to the given tx buffer. +func (m *idManager) add(b *queue.TxBuffer) uint64 { + if i := m.freeList; i != nilID { + // There is an id available in the free list, just use it. + m.ids[i].buf = b + m.freeList = m.ids[i].nextFree + return i + } + + // We need to expand the id descriptor. + m.ids = append(m.ids, idDescriptor{buf: b}) + return uint64(len(m.ids) - 1) +} + +// remove retrieves the tx buffer associated with the given ID, and removes the +// ID from the assigned table so that it can be reused in the future. +func (m *idManager) remove(i uint64) *queue.TxBuffer { + if i >= uint64(len(m.ids)) { + return nil + } + + desc := &m.ids[i] + b := desc.buf + if b == nil { + // The provided id is not currently assigned. + return nil + } + + desc.buf = nil + desc.nextFree = m.freeList + m.freeList = i + + return b +} + +// bufferManager manages a buffer region broken up into smaller, equally sized +// buffers. Smaller buffers can be allocated and freed. +type bufferManager struct { + freeList *queue.TxBuffer + curOffset uint64 + limit uint64 + entrySize uint32 +} + +// init initializes the buffer manager. +func (b *bufferManager) init(initialOffset, size, entrySize int) { + b.freeList = nil + b.curOffset = uint64(initialOffset) + b.limit = uint64(initialOffset + size/entrySize*entrySize) + b.entrySize = uint32(entrySize) +} + +// alloc allocates a buffer from the manager, if one is available. +func (b *bufferManager) alloc() *queue.TxBuffer { + if b.freeList != nil { + // There is a descriptor ready for reuse in the free list. + d := b.freeList + b.freeList = d.Next + d.Next = nil + return d + } + + if b.curOffset < b.limit { + // There is room available in the never-used range, so create + // a new descriptor for it. + d := &queue.TxBuffer{ + Offset: b.curOffset, + Size: b.entrySize, + } + b.curOffset += uint64(b.entrySize) + return d + } + + return nil +} + +// free returns all buffers in the list to the buffer manager so that they can +// be reused. +func (b *bufferManager) free(d *queue.TxBuffer) { + // Find the last buffer in the list. + last := d + for last.Next != nil { + last = last.Next + } + + // Push list onto free list. + last.Next = b.freeList + b.freeList = d +} diff --git a/pkg/tcpip/link/sniffer/BUILD b/pkg/tcpip/link/sniffer/BUILD new file mode 100644 index 000000000..a912707c2 --- /dev/null +++ b/pkg/tcpip/link/sniffer/BUILD @@ -0,0 +1,23 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sniffer", + srcs = [ + "pcap.go", + "sniffer.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/log", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/sniffer/pcap.go b/pkg/tcpip/link/sniffer/pcap.go new file mode 100644 index 000000000..6d0dd565a --- /dev/null +++ b/pkg/tcpip/link/sniffer/pcap.go @@ -0,0 +1,52 @@ +package sniffer + +import "time" + +type pcapHeader struct { + // MagicNumber is the file magic number. + MagicNumber uint32 + + // VersionMajor is the major version number. + VersionMajor uint16 + + // VersionMinor is the minor version number. + VersionMinor uint16 + + // Thiszone is the GMT to local correction. + Thiszone int32 + + // Sigfigs is the accuracy of timestamps. + Sigfigs uint32 + + // Snaplen is the max length of captured packets, in octets. + Snaplen uint32 + + // Network is the data link type. + Network uint32 +} + +const pcapPacketHeaderLen = 16 + +type pcapPacketHeader struct { + // Seconds is the timestamp seconds. + Seconds uint32 + + // Microseconds is the timestamp microseconds. + Microseconds uint32 + + // IncludedLength is the number of octets of packet saved in file. + IncludedLength uint32 + + // OriginalLength is the actual length of packet. + OriginalLength uint32 +} + +func newPCAPPacketHeader(incLen, orgLen uint32) pcapPacketHeader { + now := time.Now() + return pcapPacketHeader{ + Seconds: uint32(now.Unix()), + Microseconds: uint32(now.Nanosecond() / 1000), + IncludedLength: incLen, + OriginalLength: orgLen, + } +} diff --git a/pkg/tcpip/link/sniffer/sniffer.go b/pkg/tcpip/link/sniffer/sniffer.go new file mode 100644 index 000000000..da6969e94 --- /dev/null +++ b/pkg/tcpip/link/sniffer/sniffer.go @@ -0,0 +1,310 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sniffer provides the implementation of data-link layer endpoints that +// wrap another endpoint and logs inbound and outbound packets. +// +// Sniffer endpoints can be used in the networking stack by calling New(eID) to +// create a new endpoint, where eID is the ID of the endpoint being wrapped, +// and then passing it as an argument to Stack.CreateNIC(). +package sniffer + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// LogPackets is a flag used to enable or disable packet logging via the log +// package. Valid values are 0 or 1. +// +// LogPackets must be accessed atomically. +var LogPackets uint32 = 1 + +// LogPacketsToFile is a flag used to enable or disable logging packets to a +// pcap file. Valid values are 0 or 1. A file must have been specified when the +// sniffer was created for this flag to have effect. +// +// LogPacketsToFile must be accessed atomically. +var LogPacketsToFile uint32 = 1 + +type endpoint struct { + dispatcher stack.NetworkDispatcher + lower stack.LinkEndpoint + file *os.File + maxPCAPLen uint32 +} + +// New creates a new sniffer link-layer endpoint. It wraps around another +// endpoint and logs packets and they traverse the endpoint. +func New(lower tcpip.LinkEndpointID) tcpip.LinkEndpointID { + return stack.RegisterLinkEndpoint(&endpoint{ + lower: stack.FindLinkEndpoint(lower), + }) +} + +func zoneOffset() (int32, error) { + loc, err := time.LoadLocation("Local") + if err != nil { + return 0, err + } + date := time.Date(0, 0, 0, 0, 0, 0, 0, loc) + _, offset := date.Zone() + return int32(offset), nil +} + +func writePCAPHeader(w io.Writer, maxLen uint32) error { + offset, err := zoneOffset() + if err != nil { + return err + } + return binary.Write(w, binary.BigEndian, pcapHeader{ + // From https://wiki.wireshark.org/Development/LibpcapFileFormat + MagicNumber: 0xa1b2c3d4, + + VersionMajor: 2, + VersionMinor: 4, + Thiszone: offset, + Sigfigs: 0, + Snaplen: maxLen, + Network: 101, // LINKTYPE_RAW + }) +} + +// NewWithFile creates a new sniffer link-layer endpoint. It wraps around +// another endpoint and logs packets and they traverse the endpoint. +// +// Packets can be logged to file in the pcap format in addition to the standard +// human-readable logs. +// +// snapLen is the maximum amount of a packet to be saved. Packets with a length +// less than or equal too snapLen will be saved in their entirety. Longer +// packets will be truncated to snapLen. +func NewWithFile(lower tcpip.LinkEndpointID, file *os.File, snapLen uint32) (tcpip.LinkEndpointID, error) { + if err := writePCAPHeader(file, snapLen); err != nil { + return 0, err + } + return stack.RegisterLinkEndpoint(&endpoint{ + lower: stack.FindLinkEndpoint(lower), + file: file, + maxPCAPLen: snapLen, + }), nil +} + +// DeliverNetworkPacket implements the stack.NetworkDispatcher interface. It is +// called by the link-layer endpoint being wrapped when a packet arrives, and +// logs the packet before forwarding to the actual dispatcher. +func (e *endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + if atomic.LoadUint32(&LogPackets) == 1 { + LogPacket("recv", protocol, vv.First(), nil) + } + if e.file != nil && atomic.LoadUint32(&LogPacketsToFile) == 1 { + vs := vv.Views() + bs := make([][]byte, 1, 1+len(vs)) + var length int + for _, v := range vs { + if length+len(v) > int(e.maxPCAPLen) { + l := int(e.maxPCAPLen) - length + bs = append(bs, []byte(v)[:l]) + length += l + break + } + bs = append(bs, []byte(v)) + length += len(v) + } + buf := bytes.NewBuffer(make([]byte, 0, pcapPacketHeaderLen)) + binary.Write(buf, binary.BigEndian, newPCAPPacketHeader(uint32(length), uint32(vv.Size()))) + bs[0] = buf.Bytes() + if err := rawfile.NonBlockingWriteN(int(e.file.Fd()), bs...); err != nil { + panic(err) + } + } + e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, protocol, vv) +} + +// Attach implements the stack.LinkEndpoint interface. It saves the dispatcher +// and registers with the lower endpoint as its dispatcher so that "e" is called +// for inbound packets. +func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher + e.lower.Attach(e) +} + +// MTU implements stack.LinkEndpoint.MTU. It just forwards the request to the +// lower endpoint. +func (e *endpoint) MTU() uint32 { + return e.lower.MTU() +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. It just forwards the +// request to the lower endpoint. +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.lower.Capabilities() +} + +// MaxHeaderLength implements the stack.LinkEndpoint interface. It just forwards +// the request to the lower endpoint. +func (e *endpoint) MaxHeaderLength() uint16 { + return e.lower.MaxHeaderLength() +} + +func (e *endpoint) LinkAddress() tcpip.LinkAddress { + return e.lower.LinkAddress() +} + +// WritePacket implements the stack.LinkEndpoint interface. It is called by +// higher-level protocols to write packets; it just logs the packet and forwards +// the request to the lower endpoint. +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + if atomic.LoadUint32(&LogPackets) == 1 { + LogPacket("send", protocol, hdr.UsedBytes(), payload) + } + if e.file != nil && atomic.LoadUint32(&LogPacketsToFile) == 1 { + bs := [][]byte{nil, hdr.UsedBytes(), payload} + var length int + + for i, b := range bs[1:] { + if rem := int(e.maxPCAPLen) - length; len(b) > rem { + b = b[:rem] + } + bs[i+1] = b + length += len(b) + } + + buf := bytes.NewBuffer(make([]byte, 0, pcapPacketHeaderLen)) + binary.Write(buf, binary.BigEndian, newPCAPPacketHeader(uint32(length), uint32(hdr.UsedLength()+len(payload)))) + bs[0] = buf.Bytes() + if err := rawfile.NonBlockingWriteN(int(e.file.Fd()), bs...); err != nil { + panic(err) + } + } + return e.lower.WritePacket(r, hdr, payload, protocol) +} + +// LogPacket logs the given packet. +func LogPacket(prefix string, protocol tcpip.NetworkProtocolNumber, b, plb []byte) { + // Figure out the network layer info. + var transProto uint8 + src := tcpip.Address("unknown") + dst := tcpip.Address("unknown") + id := 0 + size := uint16(0) + switch protocol { + case header.IPv4ProtocolNumber: + ipv4 := header.IPv4(b) + src = ipv4.SourceAddress() + dst = ipv4.DestinationAddress() + transProto = ipv4.Protocol() + size = ipv4.TotalLength() - uint16(ipv4.HeaderLength()) + b = b[ipv4.HeaderLength():] + id = int(ipv4.ID()) + + case header.IPv6ProtocolNumber: + ipv6 := header.IPv6(b) + src = ipv6.SourceAddress() + dst = ipv6.DestinationAddress() + transProto = ipv6.NextHeader() + size = ipv6.PayloadLength() + b = b[header.IPv6MinimumSize:] + + case header.ARPProtocolNumber: + arp := header.ARP(b) + log.Infof( + "%s arp %v (%v) -> %v (%v) valid:%v", + prefix, + tcpip.Address(arp.ProtocolAddressSender()), tcpip.LinkAddress(arp.HardwareAddressSender()), + tcpip.Address(arp.ProtocolAddressTarget()), tcpip.LinkAddress(arp.HardwareAddressTarget()), + arp.IsValid(), + ) + return + default: + log.Infof("%s unknown network protocol", prefix) + return + } + + // Figure out the transport layer info. + transName := "unknown" + srcPort := uint16(0) + dstPort := uint16(0) + details := "" + switch tcpip.TransportProtocolNumber(transProto) { + case header.ICMPv4ProtocolNumber: + transName = "icmp" + icmp := header.ICMPv4(b) + icmpType := "unknown" + switch icmp.Type() { + case header.ICMPv4EchoReply: + icmpType = "echo reply" + case header.ICMPv4DstUnreachable: + icmpType = "destination unreachable" + case header.ICMPv4SrcQuench: + icmpType = "source quench" + case header.ICMPv4Redirect: + icmpType = "redirect" + case header.ICMPv4Echo: + icmpType = "echo" + case header.ICMPv4TimeExceeded: + icmpType = "time exceeded" + case header.ICMPv4ParamProblem: + icmpType = "param problem" + case header.ICMPv4Timestamp: + icmpType = "timestamp" + case header.ICMPv4TimestampReply: + icmpType = "timestamp reply" + case header.ICMPv4InfoRequest: + icmpType = "info request" + case header.ICMPv4InfoReply: + icmpType = "info reply" + } + log.Infof("%s %s %v -> %v %s len:%d id:%04x code:%d", prefix, transName, src, dst, icmpType, size, id, icmp.Code()) + return + + case header.UDPProtocolNumber: + transName = "udp" + udp := header.UDP(b) + srcPort = udp.SourcePort() + dstPort = udp.DestinationPort() + size -= header.UDPMinimumSize + + details = fmt.Sprintf("xsum: 0x%x", udp.Checksum()) + + case header.TCPProtocolNumber: + transName = "tcp" + tcp := header.TCP(b) + srcPort = tcp.SourcePort() + dstPort = tcp.DestinationPort() + size -= uint16(tcp.DataOffset()) + + // Initialize the TCP flags. + flags := tcp.Flags() + flagsStr := []byte("FSRPAU") + for i := range flagsStr { + if flags&(1<<uint(i)) == 0 { + flagsStr[i] = ' ' + } + } + details = fmt.Sprintf("flags:0x%02x (%v) seqnum: %v ack: %v win: %v xsum:0x%x", flags, string(flagsStr), tcp.SequenceNumber(), tcp.AckNumber(), tcp.WindowSize(), tcp.Checksum()) + if flags&header.TCPFlagSyn != 0 { + details += fmt.Sprintf(" options: %+v", header.ParseSynOptions(tcp.Options(), flags&header.TCPFlagAck != 0)) + } else { + details += fmt.Sprintf(" options: %+v", tcp.ParsedOptions()) + } + default: + log.Infof("%s %v -> %v unknown transport protocol: %d", prefix, src, dst, transProto) + return + } + + log.Infof("%s %s %v:%v -> %v:%v len:%d id:%04x %s", prefix, transName, src, srcPort, dst, dstPort, size, id, details) +} diff --git a/pkg/tcpip/link/tun/BUILD b/pkg/tcpip/link/tun/BUILD new file mode 100644 index 000000000..d627f00f1 --- /dev/null +++ b/pkg/tcpip/link/tun/BUILD @@ -0,0 +1,12 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "tun", + srcs = ["tun_unsafe.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/tun", + visibility = [ + "//visibility:public", + ], +) diff --git a/pkg/tcpip/link/tun/tun_unsafe.go b/pkg/tcpip/link/tun/tun_unsafe.go new file mode 100644 index 000000000..5b6c9b4ab --- /dev/null +++ b/pkg/tcpip/link/tun/tun_unsafe.go @@ -0,0 +1,50 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tun + +import ( + "syscall" + "unsafe" +) + +// Open opens the specified TUN device, sets it to non-blocking mode, and +// returns its file descriptor. +func Open(name string) (int, error) { + return open(name, syscall.IFF_TUN|syscall.IFF_NO_PI) +} + +// OpenTAP opens the specified TAP device, sets it to non-blocking mode, and +// returns its file descriptor. +func OpenTAP(name string) (int, error) { + return open(name, syscall.IFF_TAP|syscall.IFF_NO_PI) +} + +func open(name string, flags uint16) (int, error) { + fd, err := syscall.Open("/dev/net/tun", syscall.O_RDWR, 0) + if err != nil { + return -1, err + } + + var ifr struct { + name [16]byte + flags uint16 + _ [22]byte + } + + copy(ifr.name[:], name) + ifr.flags = flags + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETIFF, uintptr(unsafe.Pointer(&ifr))) + if errno != 0 { + syscall.Close(fd) + return -1, errno + } + + if err = syscall.SetNonblock(fd, true); err != nil { + syscall.Close(fd) + return -1, err + } + + return fd, nil +} diff --git a/pkg/tcpip/link/waitable/BUILD b/pkg/tcpip/link/waitable/BUILD new file mode 100644 index 000000000..63b648be7 --- /dev/null +++ b/pkg/tcpip/link/waitable/BUILD @@ -0,0 +1,33 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "waitable", + srcs = [ + "waitable.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/link/waitable", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/gate", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/stack", + ], +) + +go_test( + name = "waitable_test", + srcs = [ + "waitable_test.go", + ], + embed = [":waitable"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/link/waitable/waitable.go b/pkg/tcpip/link/waitable/waitable.go new file mode 100644 index 000000000..2c6e73f22 --- /dev/null +++ b/pkg/tcpip/link/waitable/waitable.go @@ -0,0 +1,108 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waitable provides the implementation of data-link layer endpoints +// that wrap other endpoints, and can wait for inflight calls to WritePacket or +// DeliverNetworkPacket to finish (and new ones to be prevented). +// +// Waitable endpoints can be used in the networking stack by calling New(eID) to +// create a new endpoint, where eID is the ID of the endpoint being wrapped, +// and then passing it as an argument to Stack.CreateNIC(). +package waitable + +import ( + "gvisor.googlesource.com/gvisor/pkg/gate" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// Endpoint is a waitable link-layer endpoint. +type Endpoint struct { + dispatchGate gate.Gate + dispatcher stack.NetworkDispatcher + + writeGate gate.Gate + lower stack.LinkEndpoint +} + +// New creates a new waitable link-layer endpoint. It wraps around another +// endpoint and allows the caller to block new write/dispatch calls and wait for +// the inflight ones to finish before returning. +func New(lower tcpip.LinkEndpointID) (tcpip.LinkEndpointID, *Endpoint) { + e := &Endpoint{ + lower: stack.FindLinkEndpoint(lower), + } + return stack.RegisterLinkEndpoint(e), e +} + +// DeliverNetworkPacket implements stack.NetworkDispatcher.DeliverNetworkPacket. +// It is called by the link-layer endpoint being wrapped when a packet arrives, +// and only forwards to the actual dispatcher if Wait or WaitDispatch haven't +// been called. +func (e *Endpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + if !e.dispatchGate.Enter() { + return + } + + e.dispatcher.DeliverNetworkPacket(e, remoteLinkAddr, protocol, vv) + e.dispatchGate.Leave() +} + +// Attach implements stack.LinkEndpoint.Attach. It saves the dispatcher and +// registers with the lower endpoint as its dispatcher so that "e" is called +// for inbound packets. +func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher + e.lower.Attach(e) +} + +// MTU implements stack.LinkEndpoint.MTU. It just forwards the request to the +// lower endpoint. +func (e *Endpoint) MTU() uint32 { + return e.lower.MTU() +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. It just forwards the +// request to the lower endpoint. +func (e *Endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.lower.Capabilities() +} + +// MaxHeaderLength implements stack.LinkEndpoint.MaxHeaderLength. It just +// forwards the request to the lower endpoint. +func (e *Endpoint) MaxHeaderLength() uint16 { + return e.lower.MaxHeaderLength() +} + +// LinkAddress implements stack.LinkEndpoint.LinkAddress. It just forwards the +// request to the lower endpoint. +func (e *Endpoint) LinkAddress() tcpip.LinkAddress { + return e.lower.LinkAddress() +} + +// WritePacket implements stack.LinkEndpoint.WritePacket. It is called by +// higher-level protocols to write packets. It only forwards packets to the +// lower endpoint if Wait or WaitWrite haven't been called. +func (e *Endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + if !e.writeGate.Enter() { + return nil + } + + err := e.lower.WritePacket(r, hdr, payload, protocol) + e.writeGate.Leave() + return err +} + +// WaitWrite prevents new calls to WritePacket from reaching the lower endpoint, +// and waits for inflight ones to finish before returning. +func (e *Endpoint) WaitWrite() { + e.writeGate.Close() +} + +// WaitDispatch prevents new calls to DeliverNetworkPacket from reaching the +// actual dispatcher, and waits for inflight ones to finish before returning. +func (e *Endpoint) WaitDispatch() { + e.dispatchGate.Close() +} diff --git a/pkg/tcpip/link/waitable/waitable_test.go b/pkg/tcpip/link/waitable/waitable_test.go new file mode 100644 index 000000000..cb433dc19 --- /dev/null +++ b/pkg/tcpip/link/waitable/waitable_test.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package waitable + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +type countedEndpoint struct { + dispatchCount int + writeCount int + attachCount int + + mtu uint32 + capabilities stack.LinkEndpointCapabilities + hdrLen uint16 + linkAddr tcpip.LinkAddress + + dispatcher stack.NetworkDispatcher +} + +func (e *countedEndpoint) DeliverNetworkPacket(linkEP stack.LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + e.dispatchCount++ +} + +func (e *countedEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.attachCount++ + e.dispatcher = dispatcher +} + +func (e *countedEndpoint) MTU() uint32 { + return e.mtu +} + +func (e *countedEndpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.capabilities +} + +func (e *countedEndpoint) MaxHeaderLength() uint16 { + return e.hdrLen +} + +func (e *countedEndpoint) LinkAddress() tcpip.LinkAddress { + return e.linkAddr +} + +func (e *countedEndpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + e.writeCount++ + return nil +} + +func TestWaitWrite(t *testing.T) { + ep := &countedEndpoint{} + _, wep := New(stack.RegisterLinkEndpoint(ep)) + + // Write and check that it goes through. + wep.WritePacket(nil, nil, nil, 0) + if want := 1; ep.writeCount != want { + t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want) + } + + // Wait on dispatches, then try to write. It must go through. + wep.WaitDispatch() + wep.WritePacket(nil, nil, nil, 0) + if want := 2; ep.writeCount != want { + t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want) + } + + // Wait on writes, then try to write. It must not go through. + wep.WaitWrite() + wep.WritePacket(nil, nil, nil, 0) + if want := 2; ep.writeCount != want { + t.Fatalf("Unexpected writeCount: got=%v, want=%v", ep.writeCount, want) + } +} + +func TestWaitDispatch(t *testing.T) { + ep := &countedEndpoint{} + _, wep := New(stack.RegisterLinkEndpoint(ep)) + + // Check that attach happens. + wep.Attach(ep) + if want := 1; ep.attachCount != want { + t.Fatalf("Unexpected attachCount: got=%v, want=%v", ep.attachCount, want) + } + + // Dispatch and check that it goes through. + ep.dispatcher.DeliverNetworkPacket(ep, "", 0, nil) + if want := 1; ep.dispatchCount != want { + t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want) + } + + // Wait on writes, then try to dispatch. It must go through. + wep.WaitWrite() + ep.dispatcher.DeliverNetworkPacket(ep, "", 0, nil) + if want := 2; ep.dispatchCount != want { + t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want) + } + + // Wait on dispatches, then try to dispatch. It must not go through. + wep.WaitDispatch() + ep.dispatcher.DeliverNetworkPacket(ep, "", 0, nil) + if want := 2; ep.dispatchCount != want { + t.Fatalf("Unexpected dispatchCount: got=%v, want=%v", ep.dispatchCount, want) + } +} + +func TestOtherMethods(t *testing.T) { + const ( + mtu = 0xdead + capabilities = 0xbeef + hdrLen = 0x1234 + linkAddr = "test address" + ) + ep := &countedEndpoint{ + mtu: mtu, + capabilities: capabilities, + hdrLen: hdrLen, + linkAddr: linkAddr, + } + _, wep := New(stack.RegisterLinkEndpoint(ep)) + + if v := wep.MTU(); v != mtu { + t.Fatalf("Unexpected mtu: got=%v, want=%v", v, mtu) + } + + if v := wep.Capabilities(); v != capabilities { + t.Fatalf("Unexpected capabilities: got=%v, want=%v", v, capabilities) + } + + if v := wep.MaxHeaderLength(); v != hdrLen { + t.Fatalf("Unexpected MaxHeaderLength: got=%v, want=%v", v, hdrLen) + } + + if v := wep.LinkAddress(); v != linkAddr { + t.Fatalf("Unexpected LinkAddress: got=%q, want=%q", v, linkAddr) + } +} diff --git a/pkg/tcpip/network/BUILD b/pkg/tcpip/network/BUILD new file mode 100644 index 000000000..36ddaa692 --- /dev/null +++ b/pkg/tcpip/network/BUILD @@ -0,0 +1,19 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "ip_test", + size = "small", + srcs = [ + "ip_test.go", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/network/arp/BUILD b/pkg/tcpip/network/arp/BUILD new file mode 100644 index 000000000..e6d0899a9 --- /dev/null +++ b/pkg/tcpip/network/arp/BUILD @@ -0,0 +1,34 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "arp", + srcs = ["arp.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/network/arp", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/stack", + ], +) + +go_test( + name = "arp_test", + size = "small", + srcs = ["arp_test.go"], + deps = [ + ":arp", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/link/channel", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/network/arp/arp.go b/pkg/tcpip/network/arp/arp.go new file mode 100644 index 000000000..4e3d7f597 --- /dev/null +++ b/pkg/tcpip/network/arp/arp.go @@ -0,0 +1,170 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package arp implements the ARP network protocol. It is used to resolve +// IPv4 addresses into link-local MAC addresses, and advertises IPv4 +// addresses of its stack with the local network. +// +// To use it in the networking stack, pass arp.ProtocolName as one of the +// network protocols when calling stack.New. Then add an "arp" address to +// every NIC on the stack that should respond to ARP requests. That is: +// +// if err := s.AddAddress(1, arp.ProtocolNumber, "arp"); err != nil { +// // handle err +// } +package arp + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + // ProtocolName is the string representation of the ARP protocol name. + ProtocolName = "arp" + + // ProtocolNumber is the ARP protocol number. + ProtocolNumber = header.ARPProtocolNumber + + // ProtocolAddress is the address expected by the ARP endpoint. + ProtocolAddress = tcpip.Address("arp") +) + +// endpoint implements stack.NetworkEndpoint. +type endpoint struct { + nicid tcpip.NICID + addr tcpip.Address + linkEP stack.LinkEndpoint + linkAddrCache stack.LinkAddressCache +} + +func (e *endpoint) MTU() uint32 { + lmtu := e.linkEP.MTU() + return lmtu - uint32(e.MaxHeaderLength()) +} + +func (e *endpoint) NICID() tcpip.NICID { + return e.nicid +} + +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.linkEP.Capabilities() +} + +func (e *endpoint) ID() *stack.NetworkEndpointID { + return &stack.NetworkEndpointID{ProtocolAddress} +} + +func (e *endpoint) MaxHeaderLength() uint16 { + return e.linkEP.MaxHeaderLength() + header.ARPSize +} + +func (e *endpoint) Close() {} + +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error { + return tcpip.ErrNotSupported +} + +func (e *endpoint) HandlePacket(r *stack.Route, vv *buffer.VectorisedView) { + v := vv.First() + h := header.ARP(v) + if !h.IsValid() { + return + } + + switch h.Op() { + case header.ARPRequest: + localAddr := tcpip.Address(h.ProtocolAddressTarget()) + if e.linkAddrCache.CheckLocalAddress(e.nicid, header.IPv4ProtocolNumber, localAddr) == 0 { + return // we have no useful answer, ignore the request + } + hdr := buffer.NewPrependable(int(e.linkEP.MaxHeaderLength()) + header.ARPSize) + pkt := header.ARP(hdr.Prepend(header.ARPSize)) + pkt.SetIPv4OverEthernet() + pkt.SetOp(header.ARPReply) + copy(pkt.HardwareAddressSender(), r.LocalLinkAddress[:]) + copy(pkt.ProtocolAddressSender(), h.ProtocolAddressTarget()) + copy(pkt.ProtocolAddressTarget(), h.ProtocolAddressSender()) + e.linkEP.WritePacket(r, &hdr, nil, ProtocolNumber) + fallthrough // also fill the cache from requests + case header.ARPReply: + addr := tcpip.Address(h.ProtocolAddressSender()) + linkAddr := tcpip.LinkAddress(h.HardwareAddressSender()) + e.linkAddrCache.AddLinkAddress(e.nicid, addr, linkAddr) + } +} + +// protocol implements stack.NetworkProtocol and stack.LinkAddressResolver. +type protocol struct { +} + +func (p *protocol) Number() tcpip.NetworkProtocolNumber { return ProtocolNumber } +func (p *protocol) MinimumPacketSize() int { return header.ARPSize } + +func (*protocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) { + h := header.ARP(v) + return tcpip.Address(h.ProtocolAddressSender()), ProtocolAddress +} + +func (p *protocol) NewEndpoint(nicid tcpip.NICID, addr tcpip.Address, linkAddrCache stack.LinkAddressCache, dispatcher stack.TransportDispatcher, sender stack.LinkEndpoint) (stack.NetworkEndpoint, *tcpip.Error) { + if addr != ProtocolAddress { + return nil, tcpip.ErrBadLocalAddress + } + return &endpoint{ + nicid: nicid, + addr: addr, + linkEP: sender, + linkAddrCache: linkAddrCache, + }, nil +} + +// LinkAddressProtocol implements stack.LinkAddressResolver. +func (*protocol) LinkAddressProtocol() tcpip.NetworkProtocolNumber { + return header.IPv4ProtocolNumber +} + +// LinkAddressRequest implements stack.LinkAddressResolver. +func (*protocol) LinkAddressRequest(addr, localAddr tcpip.Address, linkEP stack.LinkEndpoint) *tcpip.Error { + r := &stack.Route{ + RemoteLinkAddress: broadcastMAC, + } + + hdr := buffer.NewPrependable(int(linkEP.MaxHeaderLength()) + header.ARPSize) + h := header.ARP(hdr.Prepend(header.ARPSize)) + h.SetIPv4OverEthernet() + h.SetOp(header.ARPRequest) + copy(h.HardwareAddressSender(), linkEP.LinkAddress()) + copy(h.ProtocolAddressSender(), localAddr) + copy(h.ProtocolAddressTarget(), addr) + + return linkEP.WritePacket(r, &hdr, nil, ProtocolNumber) +} + +// ResolveStaticAddress implements stack.LinkAddressResolver. +func (*protocol) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) { + if addr == "\xff\xff\xff\xff" { + return broadcastMAC, true + } + return "", false +} + +// SetOption implements NetworkProtocol. +func (p *protocol) SetOption(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// Option implements NetworkProtocol. +func (p *protocol) Option(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +var broadcastMAC = tcpip.LinkAddress([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) + +func init() { + stack.RegisterNetworkProtocolFactory(ProtocolName, func() stack.NetworkProtocol { + return &protocol{} + }) +} diff --git a/pkg/tcpip/network/arp/arp_test.go b/pkg/tcpip/network/arp/arp_test.go new file mode 100644 index 000000000..91ffdce4b --- /dev/null +++ b/pkg/tcpip/network/arp/arp_test.go @@ -0,0 +1,138 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arp_test + +import ( + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/arp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + stackLinkAddr = tcpip.LinkAddress("\x0a\x0a\x0b\x0b\x0c\x0c") + stackAddr1 = tcpip.Address("\x0a\x00\x00\x01") + stackAddr2 = tcpip.Address("\x0a\x00\x00\x02") + stackAddrBad = tcpip.Address("\x0a\x00\x00\x03") +) + +type testContext struct { + t *testing.T + linkEP *channel.Endpoint + s *stack.Stack +} + +func newTestContext(t *testing.T) *testContext { + s := stack.New([]string{ipv4.ProtocolName, arp.ProtocolName}, []string{ipv4.PingProtocolName}) + + const defaultMTU = 65536 + id, linkEP := channel.New(256, defaultMTU, stackLinkAddr) + if testing.Verbose() { + id = sniffer.New(id) + } + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr1); err != nil { + t.Fatalf("AddAddress for ipv4 failed: %v", err) + } + if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr2); err != nil { + t.Fatalf("AddAddress for ipv4 failed: %v", err) + } + if err := s.AddAddress(1, arp.ProtocolNumber, arp.ProtocolAddress); err != nil { + t.Fatalf("AddAddress for arp failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{ + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }}) + + return &testContext{ + t: t, + s: s, + linkEP: linkEP, + } +} + +func (c *testContext) cleanup() { + close(c.linkEP.C) +} + +func TestDirectRequest(t *testing.T) { + c := newTestContext(t) + defer c.cleanup() + + const senderMAC = "\x01\x02\x03\x04\x05\x06" + const senderIPv4 = "\x0a\x00\x00\x02" + + v := make(buffer.View, header.ARPSize) + h := header.ARP(v) + h.SetIPv4OverEthernet() + h.SetOp(header.ARPRequest) + copy(h.HardwareAddressSender(), senderMAC) + copy(h.ProtocolAddressSender(), senderIPv4) + + // stackAddr1 + copy(h.ProtocolAddressTarget(), stackAddr1) + vv := v.ToVectorisedView([1]buffer.View{}) + c.linkEP.Inject(arp.ProtocolNumber, &vv) + pkt := <-c.linkEP.C + if pkt.Proto != arp.ProtocolNumber { + t.Fatalf("stackAddr1: expected ARP response, got network protocol number %v", pkt.Proto) + } + rep := header.ARP(pkt.Header) + if !rep.IsValid() { + t.Fatalf("stackAddr1: invalid ARP response len(pkt.Header)=%d", len(pkt.Header)) + } + if tcpip.Address(rep.ProtocolAddressSender()) != stackAddr1 { + t.Errorf("stackAddr1: expected sender to be set") + } + if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != stackLinkAddr { + t.Errorf("stackAddr1: expected sender to be stackLinkAddr, got %q", got) + } + + // stackAddr2 + copy(h.ProtocolAddressTarget(), stackAddr2) + vv = v.ToVectorisedView([1]buffer.View{}) + c.linkEP.Inject(arp.ProtocolNumber, &vv) + pkt = <-c.linkEP.C + if pkt.Proto != arp.ProtocolNumber { + t.Fatalf("stackAddr2: expected ARP response, got network protocol number %v", pkt.Proto) + } + rep = header.ARP(pkt.Header) + if !rep.IsValid() { + t.Fatalf("stackAddr2: invalid ARP response len(pkt.Header)=%d", len(pkt.Header)) + } + if tcpip.Address(rep.ProtocolAddressSender()) != stackAddr2 { + t.Errorf("stackAddr2: expected sender to be set") + } + if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != stackLinkAddr { + t.Errorf("stackAddr2: expected sender to be stackLinkAddr, got %q", got) + } + + // stackAddrBad + copy(h.ProtocolAddressTarget(), stackAddrBad) + vv = v.ToVectorisedView([1]buffer.View{}) + c.linkEP.Inject(arp.ProtocolNumber, &vv) + select { + case pkt := <-c.linkEP.C: + t.Errorf("stackAddrBad: unexpected packet sent, Proto=%v", pkt.Proto) + case <-time.After(100 * time.Millisecond): + // Sleep tests are gross, but this will only + // potentially fail flakily if there's a bugj + // If there is no bug this will reliably succeed. + } +} diff --git a/pkg/tcpip/network/fragmentation/BUILD b/pkg/tcpip/network/fragmentation/BUILD new file mode 100644 index 000000000..78fe878ec --- /dev/null +++ b/pkg/tcpip/network/fragmentation/BUILD @@ -0,0 +1,61 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "fragmentation_state", + srcs = ["reassembler_list.go"], + out = "fragmentation_state.go", + package = "fragmentation", +) + +go_template_instance( + name = "reassembler_list", + out = "reassembler_list.go", + package = "fragmentation", + prefix = "reassembler", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*reassembler", + }, +) + +go_library( + name = "fragmentation", + srcs = [ + "frag_heap.go", + "fragmentation.go", + "fragmentation_state.go", + "reassembler.go", + "reassembler_list.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/network/fragmentation", + visibility = ["//:sandbox"], + deps = [ + "//pkg/log", + "//pkg/state", + "//pkg/tcpip/buffer", + ], +) + +go_test( + name = "fragmentation_test", + size = "small", + srcs = [ + "frag_heap_test.go", + "fragmentation_test.go", + "reassembler_test.go", + ], + embed = [":fragmentation"], + deps = ["//pkg/tcpip/buffer"], +) + +filegroup( + name = "autogen", + srcs = [ + "reassembler_list.go", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/tcpip/network/fragmentation/frag_heap.go b/pkg/tcpip/network/fragmentation/frag_heap.go new file mode 100644 index 000000000..2e8512909 --- /dev/null +++ b/pkg/tcpip/network/fragmentation/frag_heap.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fragmentation + +import ( + "container/heap" + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +type fragment struct { + offset uint16 + vv *buffer.VectorisedView +} + +type fragHeap []fragment + +func (h *fragHeap) Len() int { + return len(*h) +} + +func (h *fragHeap) Less(i, j int) bool { + return (*h)[i].offset < (*h)[j].offset +} + +func (h *fragHeap) Swap(i, j int) { + (*h)[i], (*h)[j] = (*h)[j], (*h)[i] +} + +func (h *fragHeap) Push(x interface{}) { + *h = append(*h, x.(fragment)) +} + +func (h *fragHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[:n-1] + return x +} + +// reassamble empties the heap and returns a VectorisedView +// containing a reassambled version of the fragments inside the heap. +func (h *fragHeap) reassemble() (buffer.VectorisedView, error) { + curr := heap.Pop(h).(fragment) + views := curr.vv.Views() + size := curr.vv.Size() + + if curr.offset != 0 { + return buffer.NewVectorisedView(0, nil), fmt.Errorf("offset of the first packet is != 0 (%d)", curr.offset) + } + + for h.Len() > 0 { + curr := heap.Pop(h).(fragment) + if int(curr.offset) < size { + curr.vv.TrimFront(size - int(curr.offset)) + } else if int(curr.offset) > size { + return buffer.NewVectorisedView(0, nil), fmt.Errorf("packet has a hole, expected offset %d, got %d", size, curr.offset) + } + size += curr.vv.Size() + views = append(views, curr.vv.Views()...) + } + return buffer.NewVectorisedView(size, views), nil +} diff --git a/pkg/tcpip/network/fragmentation/frag_heap_test.go b/pkg/tcpip/network/fragmentation/frag_heap_test.go new file mode 100644 index 000000000..218a24d7b --- /dev/null +++ b/pkg/tcpip/network/fragmentation/frag_heap_test.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fragmentation + +import ( + "container/heap" + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +var reassambleTestCases = []struct { + comment string + in []fragment + want *buffer.VectorisedView +}{ + { + comment: "Non-overlapping in-order", + in: []fragment{ + {offset: 0, vv: vv(1, "0")}, + {offset: 1, vv: vv(1, "1")}, + }, + want: vv(2, "0", "1"), + }, + { + comment: "Non-overlapping out-of-order", + in: []fragment{ + {offset: 1, vv: vv(1, "1")}, + {offset: 0, vv: vv(1, "0")}, + }, + want: vv(2, "0", "1"), + }, + { + comment: "Duplicated packets", + in: []fragment{ + {offset: 0, vv: vv(1, "0")}, + {offset: 0, vv: vv(1, "0")}, + }, + want: vv(1, "0"), + }, + { + comment: "Overlapping in-order", + in: []fragment{ + {offset: 0, vv: vv(2, "01")}, + {offset: 1, vv: vv(2, "12")}, + }, + want: vv(3, "01", "2"), + }, + { + comment: "Overlapping out-of-order", + in: []fragment{ + {offset: 1, vv: vv(2, "12")}, + {offset: 0, vv: vv(2, "01")}, + }, + want: vv(3, "01", "2"), + }, + { + comment: "Overlapping subset in-order", + in: []fragment{ + {offset: 0, vv: vv(3, "012")}, + {offset: 1, vv: vv(1, "1")}, + }, + want: vv(3, "012"), + }, + { + comment: "Overlapping subset out-of-order", + in: []fragment{ + {offset: 1, vv: vv(1, "1")}, + {offset: 0, vv: vv(3, "012")}, + }, + want: vv(3, "012"), + }, +} + +func TestReassamble(t *testing.T) { + for _, c := range reassambleTestCases { + h := (fragHeap)(make([]fragment, 0, 8)) + heap.Init(&h) + for _, f := range c.in { + heap.Push(&h, f) + } + got, _ := h.reassemble() + + if !reflect.DeepEqual(got, *c.want) { + t.Errorf("Test \"%s\" reassembling failed. Got %v. Want %v", c.comment, got, *c.want) + } + } +} + +func TestReassambleFailsForNonZeroOffset(t *testing.T) { + h := (fragHeap)(make([]fragment, 0, 8)) + heap.Init(&h) + heap.Push(&h, fragment{offset: 1, vv: vv(1, "0")}) + _, err := h.reassemble() + if err == nil { + t.Errorf("reassemble() did not fail when the first packet had offset != 0") + } +} + +func TestReassambleFailsForHoles(t *testing.T) { + h := (fragHeap)(make([]fragment, 0, 8)) + heap.Init(&h) + heap.Push(&h, fragment{offset: 0, vv: vv(1, "0")}) + heap.Push(&h, fragment{offset: 2, vv: vv(1, "1")}) + _, err := h.reassemble() + if err == nil { + t.Errorf("reassemble() did not fail when there was a hole in the packet") + } +} diff --git a/pkg/tcpip/network/fragmentation/fragmentation.go b/pkg/tcpip/network/fragmentation/fragmentation.go new file mode 100644 index 000000000..a309a24c5 --- /dev/null +++ b/pkg/tcpip/network/fragmentation/fragmentation.go @@ -0,0 +1,124 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fragmentation contains the implementation of IP fragmentation. +// It is based on RFC 791 and RFC 815. +package fragmentation + +import ( + "log" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +// DefaultReassembleTimeout is based on the linux stack: net.ipv4.ipfrag_time. +const DefaultReassembleTimeout = 30 * time.Second + +// HighFragThreshold is the threshold at which we start trimming old +// fragmented packets. Linux uses a default value of 4 MB. See +// net.ipv4.ipfrag_high_thresh for more information. +const HighFragThreshold = 4 << 20 // 4MB + +// LowFragThreshold is the threshold we reach to when we start dropping +// older fragmented packets. It's important that we keep enough room for newer +// packets to be re-assembled. Hence, this needs to be lower than +// HighFragThreshold enough. Linux uses a default value of 3 MB. See +// net.ipv4.ipfrag_low_thresh for more information. +const LowFragThreshold = 3 << 20 // 3MB + +// Fragmentation is the main structure that other modules +// of the stack should use to implement IP Fragmentation. +type Fragmentation struct { + mu sync.Mutex + highLimit int + lowLimit int + reassemblers map[uint32]*reassembler + rList reassemblerList + size int + timeout time.Duration +} + +// NewFragmentation creates a new Fragmentation. +// +// highMemoryLimit specifies the limit on the memory consumed +// by the fragments stored by Fragmentation (overhead of internal data-structures +// is not accounted). Fragments are dropped when the limit is reached. +// +// lowMemoryLimit specifies the limit on which we will reach by dropping +// fragments after reaching highMemoryLimit. +// +// reassemblingTimeout specifes the maximum time allowed to reassemble a packet. +// Fragments are lazily evicted only when a new a packet with an +// already existing fragmentation-id arrives after the timeout. +func NewFragmentation(highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration) *Fragmentation { + if lowMemoryLimit >= highMemoryLimit { + lowMemoryLimit = highMemoryLimit + } + + if lowMemoryLimit < 0 { + lowMemoryLimit = 0 + } + + return &Fragmentation{ + reassemblers: make(map[uint32]*reassembler), + highLimit: highMemoryLimit, + lowLimit: lowMemoryLimit, + timeout: reassemblingTimeout, + } +} + +// Process processes an incoming fragment beloning to an ID +// and returns a complete packet when all the packets belonging to that ID have been received. +func (f *Fragmentation) Process(id uint32, first, last uint16, more bool, vv *buffer.VectorisedView) (buffer.VectorisedView, bool) { + f.mu.Lock() + r, ok := f.reassemblers[id] + if ok && r.tooOld(f.timeout) { + // This is very likely to be an id-collision or someone performing a slow-rate attack. + f.release(r) + ok = false + } + if !ok { + r = newReassembler(id) + f.reassemblers[id] = r + f.rList.PushFront(r) + } + f.mu.Unlock() + + res, done, consumed := r.process(first, last, more, vv) + + f.mu.Lock() + f.size += consumed + if done { + f.release(r) + } + // Evict reassemblers if we are consuming more memory than highLimit until + // we reach lowLimit. + if f.size > f.highLimit { + tail := f.rList.Back() + for f.size > f.lowLimit && tail != nil { + f.release(tail) + tail = tail.Prev() + } + } + f.mu.Unlock() + return res, done +} + +func (f *Fragmentation) release(r *reassembler) { + // Before releasing a fragment we need to check if r is already marked as done. + // Otherwise, we would delete it twice. + if r.checkDoneOrMark() { + return + } + + delete(f.reassemblers, r.id) + f.rList.Remove(r) + f.size -= r.size + if f.size < 0 { + log.Printf("memory counter < 0 (%d), this is an accounting bug that requires investigation", f.size) + f.size = 0 + } +} diff --git a/pkg/tcpip/network/fragmentation/fragmentation_test.go b/pkg/tcpip/network/fragmentation/fragmentation_test.go new file mode 100644 index 000000000..2f0200d26 --- /dev/null +++ b/pkg/tcpip/network/fragmentation/fragmentation_test.go @@ -0,0 +1,166 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fragmentation + +import ( + "reflect" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +// vv is a helper to build VectorisedView from different strings. +func vv(size int, pieces ...string) *buffer.VectorisedView { + views := make([]buffer.View, len(pieces)) + for i, p := range pieces { + views[i] = []byte(p) + } + + vv := buffer.NewVectorisedView(size, views) + return &vv +} + +func emptyVv() *buffer.VectorisedView { + vv := buffer.NewVectorisedView(0, nil) + return &vv +} + +type processInput struct { + id uint32 + first uint16 + last uint16 + more bool + vv *buffer.VectorisedView +} + +type processOutput struct { + vv *buffer.VectorisedView + done bool +} + +var processTestCases = []struct { + comment string + in []processInput + out []processOutput +}{ + { + comment: "One ID", + in: []processInput{ + {id: 0, first: 0, last: 1, more: true, vv: vv(2, "01")}, + {id: 0, first: 2, last: 3, more: false, vv: vv(2, "23")}, + }, + out: []processOutput{ + {vv: emptyVv(), done: false}, + {vv: vv(4, "01", "23"), done: true}, + }, + }, + { + comment: "Two IDs", + in: []processInput{ + {id: 0, first: 0, last: 1, more: true, vv: vv(2, "01")}, + {id: 1, first: 0, last: 1, more: true, vv: vv(2, "ab")}, + {id: 1, first: 2, last: 3, more: false, vv: vv(2, "cd")}, + {id: 0, first: 2, last: 3, more: false, vv: vv(2, "23")}, + }, + out: []processOutput{ + {vv: emptyVv(), done: false}, + {vv: emptyVv(), done: false}, + {vv: vv(4, "ab", "cd"), done: true}, + {vv: vv(4, "01", "23"), done: true}, + }, + }, +} + +func TestFragmentationProcess(t *testing.T) { + for _, c := range processTestCases { + f := NewFragmentation(1024, 512, DefaultReassembleTimeout) + for i, in := range c.in { + vv, done := f.Process(in.id, in.first, in.last, in.more, in.vv) + if !reflect.DeepEqual(vv, *(c.out[i].vv)) { + t.Errorf("Test \"%s\" Process() returned a wrong vv. Got %v. Want %v", c.comment, vv, *(c.out[i].vv)) + } + if done != c.out[i].done { + t.Errorf("Test \"%s\" Process() returned a wrong done. Got %t. Want %t", c.comment, done, c.out[i].done) + } + if c.out[i].done { + if _, ok := f.reassemblers[in.id]; ok { + t.Errorf("Test \"%s\" Process() didn't remove buffer from reassemblers.", c.comment) + } + for n := f.rList.Front(); n != nil; n = n.Next() { + if n.id == in.id { + t.Errorf("Test \"%s\" Process() didn't remove buffer from rList.", c.comment) + } + } + } + } + } +} + +func TestReassemblingTimeout(t *testing.T) { + timeout := time.Millisecond + f := NewFragmentation(1024, 512, timeout) + // Send first fragment with id = 0, first = 0, last = 0, and more = true. + f.Process(0, 0, 0, true, vv(1, "0")) + // Sleep more than the timeout. + time.Sleep(2 * timeout) + // Send another fragment that completes a packet. + // However, no packet should be reassembled because the fragment arrived after the timeout. + _, done := f.Process(0, 1, 1, false, vv(1, "1")) + if done { + t.Errorf("Fragmentation does not respect the reassembling timeout.") + } +} + +func TestMemoryLimits(t *testing.T) { + f := NewFragmentation(3, 1, DefaultReassembleTimeout) + // Send first fragment with id = 0. + f.Process(0, 0, 0, true, vv(1, "0")) + // Send first fragment with id = 1. + f.Process(1, 0, 0, true, vv(1, "1")) + // Send first fragment with id = 2. + f.Process(2, 0, 0, true, vv(1, "2")) + + // Send first fragment with id = 3. This should caused id = 0 and id = 1 to be + // evicted. + f.Process(3, 0, 0, true, vv(1, "3")) + + if _, ok := f.reassemblers[0]; ok { + t.Errorf("Memory limits are not respected: id=0 has not been evicted.") + } + if _, ok := f.reassemblers[1]; ok { + t.Errorf("Memory limits are not respected: id=1 has not been evicted.") + } + if _, ok := f.reassemblers[3]; !ok { + t.Errorf("Implementation of memory limits is wrong: id=3 is not present.") + } +} + +func TestMemoryLimitsIgnoresDuplicates(t *testing.T) { + f := NewFragmentation(1, 0, DefaultReassembleTimeout) + // Send first fragment with id = 0. + f.Process(0, 0, 0, true, vv(1, "0")) + // Send the same packet again. + f.Process(0, 0, 0, true, vv(1, "0")) + + got := f.size + want := 1 + if got != want { + t.Errorf("Wrong size, duplicates are not handled correctly: got=%d, want=%d.", got, want) + } +} + +func TestFragmentationViewsDoNotEscape(t *testing.T) { + f := NewFragmentation(1024, 512, DefaultReassembleTimeout) + in := vv(2, "0", "1") + f.Process(0, 0, 1, true, in) + // Modify input view. + in.RemoveFirst() + got, _ := f.Process(0, 2, 2, false, vv(1, "2")) + want := vv(3, "0", "1", "2") + if !reflect.DeepEqual(got, *want) { + t.Errorf("Process() returned a wrong vv. Got %v. Want %v", got, *want) + } +} diff --git a/pkg/tcpip/network/fragmentation/reassembler.go b/pkg/tcpip/network/fragmentation/reassembler.go new file mode 100644 index 000000000..0267a575d --- /dev/null +++ b/pkg/tcpip/network/fragmentation/reassembler.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fragmentation + +import ( + "container/heap" + "fmt" + "math" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +type hole struct { + first uint16 + last uint16 + deleted bool +} + +type reassembler struct { + reassemblerEntry + id uint32 + size int + mu sync.Mutex + holes []hole + deleted int + heap fragHeap + done bool + creationTime time.Time +} + +func newReassembler(id uint32) *reassembler { + r := &reassembler{ + id: id, + holes: make([]hole, 0, 16), + deleted: 0, + heap: make(fragHeap, 0, 8), + creationTime: time.Now(), + } + r.holes = append(r.holes, hole{ + first: 0, + last: math.MaxUint16, + deleted: false}) + return r +} + +// updateHoles updates the list of holes for an incoming fragment and +// returns true iff the fragment filled at least part of an existing hole. +func (r *reassembler) updateHoles(first, last uint16, more bool) bool { + used := false + for i := range r.holes { + if r.holes[i].deleted || first > r.holes[i].last || last < r.holes[i].first { + continue + } + used = true + r.deleted++ + r.holes[i].deleted = true + if first > r.holes[i].first { + r.holes = append(r.holes, hole{r.holes[i].first, first - 1, false}) + } + if last < r.holes[i].last && more { + r.holes = append(r.holes, hole{last + 1, r.holes[i].last, false}) + } + } + return used +} + +func (r *reassembler) process(first, last uint16, more bool, vv *buffer.VectorisedView) (buffer.VectorisedView, bool, int) { + r.mu.Lock() + defer r.mu.Unlock() + consumed := 0 + if r.done { + // A concurrent goroutine might have already reassembled + // the packet and emptied the heap while this goroutine + // was waiting on the mutex. We don't have to do anything in this case. + return buffer.NewVectorisedView(0, nil), false, consumed + } + if r.updateHoles(first, last, more) { + // We store the incoming packet only if it filled some holes. + uu := vv.Clone(nil) + heap.Push(&r.heap, fragment{offset: first, vv: &uu}) + consumed = vv.Size() + r.size += consumed + } + // Check if all the holes have been deleted and we are ready to reassamble. + if r.deleted < len(r.holes) { + return buffer.NewVectorisedView(0, nil), false, consumed + } + res, err := r.heap.reassemble() + if err != nil { + panic(fmt.Sprintf("reassemble failed with: %v. There is probably a bug in the code handling the holes.", err)) + } + return res, true, consumed +} + +func (r *reassembler) tooOld(timeout time.Duration) bool { + return time.Now().Sub(r.creationTime) > timeout +} + +func (r *reassembler) checkDoneOrMark() bool { + r.mu.Lock() + prev := r.done + r.done = true + r.mu.Unlock() + return prev +} diff --git a/pkg/tcpip/network/fragmentation/reassembler_test.go b/pkg/tcpip/network/fragmentation/reassembler_test.go new file mode 100644 index 000000000..b64604383 --- /dev/null +++ b/pkg/tcpip/network/fragmentation/reassembler_test.go @@ -0,0 +1,95 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fragmentation + +import ( + "math" + "reflect" + "testing" +) + +type updateHolesInput struct { + first uint16 + last uint16 + more bool +} + +var holesTestCases = []struct { + comment string + in []updateHolesInput + want []hole +}{ + { + comment: "No fragments. Expected holes: {[0 -> inf]}.", + in: []updateHolesInput{}, + want: []hole{{first: 0, last: math.MaxUint16, deleted: false}}, + }, + { + comment: "One fragment at beginning. Expected holes: {[2, inf]}.", + in: []updateHolesInput{{first: 0, last: 1, more: true}}, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + {first: 2, last: math.MaxUint16, deleted: false}, + }, + }, + { + comment: "One fragment in the middle. Expected holes: {[0, 0], [3, inf]}.", + in: []updateHolesInput{{first: 1, last: 2, more: true}}, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + {first: 0, last: 0, deleted: false}, + {first: 3, last: math.MaxUint16, deleted: false}, + }, + }, + { + comment: "One fragment at the end. Expected holes: {[0, 0]}.", + in: []updateHolesInput{{first: 1, last: 2, more: false}}, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + {first: 0, last: 0, deleted: false}, + }, + }, + { + comment: "One fragment completing a packet. Expected holes: {}.", + in: []updateHolesInput{{first: 0, last: 1, more: false}}, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + }, + }, + { + comment: "Two non-overlapping fragments completing a packet. Expected holes: {}.", + in: []updateHolesInput{ + {first: 0, last: 1, more: true}, + {first: 2, last: 3, more: false}, + }, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + {first: 2, last: math.MaxUint16, deleted: true}, + }, + }, + { + comment: "Two overlapping fragments completing a packet. Expected holes: {}.", + in: []updateHolesInput{ + {first: 0, last: 2, more: true}, + {first: 2, last: 3, more: false}, + }, + want: []hole{ + {first: 0, last: math.MaxUint16, deleted: true}, + {first: 3, last: math.MaxUint16, deleted: true}, + }, + }, +} + +func TestUpdateHoles(t *testing.T) { + for _, c := range holesTestCases { + r := newReassembler(0) + for _, i := range c.in { + r.updateHoles(i.first, i.last, i.more) + } + if !reflect.DeepEqual(r.holes, c.want) { + t.Errorf("Test \"%s\" produced unexepetced holes. Got %v. Want %v", c.comment, r.holes, c.want) + } + } +} diff --git a/pkg/tcpip/network/hash/BUILD b/pkg/tcpip/network/hash/BUILD new file mode 100644 index 000000000..96805c690 --- /dev/null +++ b/pkg/tcpip/network/hash/BUILD @@ -0,0 +1,11 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "hash", + srcs = ["hash.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/network/hash", + visibility = ["//visibility:public"], + deps = ["//pkg/tcpip/header"], +) diff --git a/pkg/tcpip/network/hash/hash.go b/pkg/tcpip/network/hash/hash.go new file mode 100644 index 000000000..e5a696158 --- /dev/null +++ b/pkg/tcpip/network/hash/hash.go @@ -0,0 +1,83 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hash contains utility functions for hashing. +package hash + +import ( + "crypto/rand" + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" +) + +var hashIV = RandN32(1)[0] + +// RandN32 generates a slice of n cryptographic random 32-bit numbers. +func RandN32(n int) []uint32 { + b := make([]byte, 4*n) + if _, err := rand.Read(b); err != nil { + panic("unable to get random numbers: " + err.Error()) + } + r := make([]uint32, n) + for i := range r { + r[i] = binary.LittleEndian.Uint32(b[4*i : (4*i + 4)]) + } + return r +} + +// Hash3Words calculates the Jenkins hash of 3 32-bit words. This is adapted +// from linux. +func Hash3Words(a, b, c, initval uint32) uint32 { + const iv = 0xdeadbeef + (3 << 2) + initval += iv + + a += initval + b += initval + c += initval + + c ^= b + c -= rol32(b, 14) + a ^= c + a -= rol32(c, 11) + b ^= a + b -= rol32(a, 25) + c ^= b + c -= rol32(b, 16) + a ^= c + a -= rol32(c, 4) + b ^= a + b -= rol32(a, 14) + c ^= b + c -= rol32(b, 24) + + return c +} + +// IPv4FragmentHash computes the hash of the IPv4 fragment as suggested in RFC 791. +func IPv4FragmentHash(h header.IPv4) uint32 { + x := uint32(h.ID())<<16 | uint32(h.Protocol()) + t := h.SourceAddress() + y := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + t = h.DestinationAddress() + z := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + return Hash3Words(x, y, z, hashIV) +} + +// IPv6FragmentHash computes the hash of the ipv6 fragment. +// Unlike IPv4, the protocol is not used to compute the hash. +// RFC 2640 (sec 4.5) is not very sharp on this aspect. +// As a reference, also Linux ignores the protocol to compute +// the hash (inet6_hash_frag). +func IPv6FragmentHash(h header.IPv6, f header.IPv6Fragment) uint32 { + t := h.SourceAddress() + y := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + t = h.DestinationAddress() + z := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + return Hash3Words(f.ID(), y, z, hashIV) +} + +func rol32(v, shift uint32) uint32 { + return (v << shift) | (v >> ((-shift) & 31)) +} diff --git a/pkg/tcpip/network/ip_test.go b/pkg/tcpip/network/ip_test.go new file mode 100644 index 000000000..797501858 --- /dev/null +++ b/pkg/tcpip/network/ip_test.go @@ -0,0 +1,560 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ip_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// testObject implements two interfaces: LinkEndpoint and TransportDispatcher. +// The former is used to pretend that it's a link endpoint so that we can +// inspect packets written by the network endpoints. The latter is used to +// pretend that it's the network stack so that it can inspect incoming packets +// that have been handled by the network endpoints. +// +// Packets are checked by comparing their fields/values against the expected +// values stored in the test object itself. +type testObject struct { + t *testing.T + protocol tcpip.TransportProtocolNumber + contents []byte + srcAddr tcpip.Address + dstAddr tcpip.Address + v4 bool + typ stack.ControlType + extra uint32 + + dataCalls int + controlCalls int +} + +// checkValues verifies that the transport protocol, data contents, src & dst +// addresses of a packet match what's expected. If any field doesn't match, the +// test fails. +func (t *testObject) checkValues(protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView, srcAddr, dstAddr tcpip.Address) { + v := vv.ToView() + if protocol != t.protocol { + t.t.Errorf("protocol = %v, want %v", protocol, t.protocol) + } + + if srcAddr != t.srcAddr { + t.t.Errorf("srcAddr = %v, want %v", srcAddr, t.srcAddr) + } + + if dstAddr != t.dstAddr { + t.t.Errorf("dstAddr = %v, want %v", dstAddr, t.dstAddr) + } + + if len(v) != len(t.contents) { + t.t.Fatalf("len(payload) = %v, want %v", len(v), len(t.contents)) + } + + for i := range t.contents { + if t.contents[i] != v[i] { + t.t.Fatalf("payload[%v] = %v, want %v", i, v[i], t.contents[i]) + } + } +} + +// DeliverTransportPacket is called by network endpoints after parsing incoming +// packets. This is used by the test object to verify that the results of the +// parsing are expected. +func (t *testObject) DeliverTransportPacket(r *stack.Route, protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView) { + t.checkValues(protocol, vv, r.RemoteAddress, r.LocalAddress) + t.dataCalls++ +} + +// DeliverTransportControlPacket is called by network endpoints after parsing +// incoming control (ICMP) packets. This is used by the test object to verify +// that the results of the parsing are expected. +func (t *testObject) DeliverTransportControlPacket(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { + t.checkValues(trans, vv, remote, local) + if typ != t.typ { + t.t.Errorf("typ = %v, want %v", typ, t.typ) + } + if extra != t.extra { + t.t.Errorf("extra = %v, want %v", extra, t.extra) + } + t.controlCalls++ +} + +// Attach is only implemented to satisfy the LinkEndpoint interface. +func (*testObject) Attach(stack.NetworkDispatcher) {} + +// MTU implements stack.LinkEndpoint.MTU. It just returns a constant that +// matches the linux loopback MTU. +func (*testObject) MTU() uint32 { + return 65536 +} + +// Capabilities implements stack.LinkEndpoint.Capabilities. +func (*testObject) Capabilities() stack.LinkEndpointCapabilities { + return 0 +} + +// MaxHeaderLength is only implemented to satisfy the LinkEndpoint interface. +func (*testObject) MaxHeaderLength() uint16 { + return 0 +} + +// LinkAddress returns the link address of this endpoint. +func (*testObject) LinkAddress() tcpip.LinkAddress { + return "" +} + +// WritePacket is called by network endpoints after producing a packet and +// writing it to the link endpoint. This is used by the test object to verify +// that the produced packet is as expected. +func (t *testObject) WritePacket(_ *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error { + var prot tcpip.TransportProtocolNumber + var srcAddr tcpip.Address + var dstAddr tcpip.Address + + if t.v4 { + h := header.IPv4(hdr.UsedBytes()) + prot = tcpip.TransportProtocolNumber(h.Protocol()) + srcAddr = h.SourceAddress() + dstAddr = h.DestinationAddress() + + } else { + h := header.IPv6(hdr.UsedBytes()) + prot = tcpip.TransportProtocolNumber(h.NextHeader()) + srcAddr = h.SourceAddress() + dstAddr = h.DestinationAddress() + } + var views [1]buffer.View + vv := payload.ToVectorisedView(views) + t.checkValues(prot, &vv, srcAddr, dstAddr) + return nil +} + +func TestIPv4Send(t *testing.T) { + o := testObject{t: t, v4: true} + proto := ipv4.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x01", nil, nil, &o) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Allocate and initialize the payload view. + payload := buffer.NewView(100) + for i := 0; i < len(payload); i++ { + payload[i] = uint8(i) + } + + // Allocate the header buffer. + hdr := buffer.NewPrependable(int(ep.MaxHeaderLength())) + + // Issue the write. + o.protocol = 123 + o.srcAddr = "\x0a\x00\x00\x01" + o.dstAddr = "\x0a\x00\x00\x02" + o.contents = payload + + r := stack.Route{ + RemoteAddress: o.dstAddr, + LocalAddress: o.srcAddr, + } + if err := ep.WritePacket(&r, &hdr, payload, 123); err != nil { + t.Fatalf("WritePacket failed: %v", err) + } +} + +func TestIPv4Receive(t *testing.T) { + o := testObject{t: t, v4: true} + proto := ipv4.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x01", nil, &o, nil) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + totalLen := header.IPv4MinimumSize + 30 + view := buffer.NewView(totalLen) + ip := header.IPv4(view) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(totalLen), + TTL: 20, + Protocol: 10, + SrcAddr: "\x0a\x00\x00\x02", + DstAddr: "\x0a\x00\x00\x01", + }) + + // Make payload be non-zero. + for i := header.IPv4MinimumSize; i < totalLen; i++ { + view[i] = uint8(i) + } + + // Give packet to ipv4 endpoint, dispatcher will validate that it's ok. + o.protocol = 10 + o.srcAddr = "\x0a\x00\x00\x02" + o.dstAddr = "\x0a\x00\x00\x01" + o.contents = view[header.IPv4MinimumSize:totalLen] + + r := stack.Route{ + LocalAddress: o.dstAddr, + RemoteAddress: o.srcAddr, + } + var views [1]buffer.View + vv := view.ToVectorisedView(views) + ep.HandlePacket(&r, &vv) + if o.dataCalls != 1 { + t.Fatalf("Bad number of data calls: got %x, want 1", o.dataCalls) + } +} + +func TestIPv4ReceiveControl(t *testing.T) { + const mtu = 0xbeef - header.IPv4MinimumSize + cases := []struct { + name string + expectedCount int + fragmentOffset uint16 + code uint8 + expectedTyp stack.ControlType + expectedExtra uint32 + trunc int + }{ + {"FragmentationNeeded", 1, 0, header.ICMPv4FragmentationNeeded, stack.ControlPacketTooBig, mtu, 0}, + {"Truncated (10 bytes missing)", 0, 0, header.ICMPv4FragmentationNeeded, stack.ControlPacketTooBig, mtu, 10}, + {"Truncated (missing IPv4 header)", 0, 0, header.ICMPv4FragmentationNeeded, stack.ControlPacketTooBig, mtu, header.IPv4MinimumSize + 8}, + {"Truncated (missing 'extra info')", 0, 0, header.ICMPv4FragmentationNeeded, stack.ControlPacketTooBig, mtu, 4 + header.IPv4MinimumSize + 8}, + {"Truncated (missing ICMP header)", 0, 0, header.ICMPv4FragmentationNeeded, stack.ControlPacketTooBig, mtu, header.ICMPv4DstUnreachableMinimumSize + header.IPv4MinimumSize + 8}, + {"Port unreachable", 1, 0, header.ICMPv4PortUnreachable, stack.ControlPortUnreachable, 0, 0}, + {"Non-zero fragment offset", 0, 100, header.ICMPv4PortUnreachable, stack.ControlPortUnreachable, 0, 0}, + {"Zero-length packet", 0, 0, header.ICMPv4PortUnreachable, stack.ControlPortUnreachable, 0, 2*header.IPv4MinimumSize + header.ICMPv4DstUnreachableMinimumSize + 8}, + } + r := stack.Route{ + LocalAddress: "\x0a\x00\x00\x01", + RemoteAddress: "\x0a\x00\x00\xbb", + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + var views [1]buffer.View + o := testObject{t: t} + proto := ipv4.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x01", nil, &o, nil) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + const dataOffset = header.IPv4MinimumSize*2 + header.ICMPv4MinimumSize + 4 + view := buffer.NewView(dataOffset + 8) + + // Create the outer IPv4 header. + ip := header.IPv4(view) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(len(view) - c.trunc), + TTL: 20, + Protocol: uint8(header.ICMPv4ProtocolNumber), + SrcAddr: "\x0a\x00\x00\xbb", + DstAddr: "\x0a\x00\x00\x01", + }) + + // Create the ICMP header. + icmp := header.ICMPv4(view[header.IPv4MinimumSize:]) + icmp.SetType(header.ICMPv4DstUnreachable) + icmp.SetCode(c.code) + copy(view[header.IPv4MinimumSize+header.ICMPv4MinimumSize:], []byte{0xde, 0xad, 0xbe, 0xef}) + + // Create the inner IPv4 header. + ip = header.IPv4(view[header.IPv4MinimumSize+header.ICMPv4MinimumSize+4:]) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: 100, + TTL: 20, + Protocol: 10, + FragmentOffset: c.fragmentOffset, + SrcAddr: "\x0a\x00\x00\x01", + DstAddr: "\x0a\x00\x00\x02", + }) + + // Make payload be non-zero. + for i := dataOffset; i < len(view); i++ { + view[i] = uint8(i) + } + + // Give packet to IPv4 endpoint, dispatcher will validate that + // it's ok. + o.protocol = 10 + o.srcAddr = "\x0a\x00\x00\x02" + o.dstAddr = "\x0a\x00\x00\x01" + o.contents = view[dataOffset:] + o.typ = c.expectedTyp + o.extra = c.expectedExtra + + vv := view.ToVectorisedView(views) + vv.CapLength(len(view) - c.trunc) + ep.HandlePacket(&r, &vv) + if want := c.expectedCount; o.controlCalls != want { + t.Fatalf("Bad number of control calls for %q case: got %v, want %v", c.name, o.controlCalls, want) + } + }) + } +} + +func TestIPv4FragmentationReceive(t *testing.T) { + o := testObject{t: t, v4: true} + proto := ipv4.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x01", nil, &o, nil) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + totalLen := header.IPv4MinimumSize + 24 + + frag1 := buffer.NewView(totalLen) + ip1 := header.IPv4(frag1) + ip1.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(totalLen), + TTL: 20, + Protocol: 10, + FragmentOffset: 0, + Flags: header.IPv4FlagMoreFragments, + SrcAddr: "\x0a\x00\x00\x02", + DstAddr: "\x0a\x00\x00\x01", + }) + // Make payload be non-zero. + for i := header.IPv4MinimumSize; i < totalLen; i++ { + frag1[i] = uint8(i) + } + + frag2 := buffer.NewView(totalLen) + ip2 := header.IPv4(frag2) + ip2.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(totalLen), + TTL: 20, + Protocol: 10, + FragmentOffset: 24, + SrcAddr: "\x0a\x00\x00\x02", + DstAddr: "\x0a\x00\x00\x01", + }) + // Make payload be non-zero. + for i := header.IPv4MinimumSize; i < totalLen; i++ { + frag2[i] = uint8(i) + } + + // Give packet to ipv4 endpoint, dispatcher will validate that it's ok. + o.protocol = 10 + o.srcAddr = "\x0a\x00\x00\x02" + o.dstAddr = "\x0a\x00\x00\x01" + o.contents = append(frag1[header.IPv4MinimumSize:totalLen], frag2[header.IPv4MinimumSize:totalLen]...) + + r := stack.Route{ + LocalAddress: o.dstAddr, + RemoteAddress: o.srcAddr, + } + + // Send first segment. + var views1 [1]buffer.View + vv1 := frag1.ToVectorisedView(views1) + ep.HandlePacket(&r, &vv1) + if o.dataCalls != 0 { + t.Fatalf("Bad number of data calls: got %x, want 0", o.dataCalls) + } + + // Send second segment. + var views2 [1]buffer.View + vv2 := frag2.ToVectorisedView(views2) + ep.HandlePacket(&r, &vv2) + + if o.dataCalls != 1 { + t.Fatalf("Bad number of data calls: got %x, want 1", o.dataCalls) + } +} + +func TestIPv6Send(t *testing.T) { + o := testObject{t: t} + proto := ipv6.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", nil, nil, &o) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Allocate and initialize the payload view. + payload := buffer.NewView(100) + for i := 0; i < len(payload); i++ { + payload[i] = uint8(i) + } + + // Allocate the header buffer. + hdr := buffer.NewPrependable(int(ep.MaxHeaderLength())) + + // Issue the write. + o.protocol = 123 + o.srcAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + o.dstAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + o.contents = payload + + r := stack.Route{ + RemoteAddress: o.dstAddr, + LocalAddress: o.srcAddr, + } + if err := ep.WritePacket(&r, &hdr, payload, 123); err != nil { + t.Fatalf("WritePacket failed: %v", err) + } +} + +func TestIPv6Receive(t *testing.T) { + o := testObject{t: t} + proto := ipv6.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", nil, &o, nil) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + totalLen := header.IPv6MinimumSize + 30 + view := buffer.NewView(totalLen) + ip := header.IPv6(view) + ip.Encode(&header.IPv6Fields{ + PayloadLength: uint16(totalLen - header.IPv6MinimumSize), + NextHeader: 10, + HopLimit: 20, + SrcAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02", + DstAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", + }) + + // Make payload be non-zero. + for i := header.IPv6MinimumSize; i < totalLen; i++ { + view[i] = uint8(i) + } + + // Give packet to ipv6 endpoint, dispatcher will validate that it's ok. + o.protocol = 10 + o.srcAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + o.dstAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + o.contents = view[header.IPv6MinimumSize:totalLen] + + r := stack.Route{ + LocalAddress: o.dstAddr, + RemoteAddress: o.srcAddr, + } + var views [1]buffer.View + vv := view.ToVectorisedView(views) + ep.HandlePacket(&r, &vv) + + if o.dataCalls != 1 { + t.Fatalf("Bad number of data calls: got %x, want 1", o.dataCalls) + } +} + +func TestIPv6ReceiveControl(t *testing.T) { + newUint16 := func(v uint16) *uint16 { return &v } + + const mtu = 0xffff + cases := []struct { + name string + expectedCount int + fragmentOffset *uint16 + typ header.ICMPv6Type + code uint8 + expectedTyp stack.ControlType + expectedExtra uint32 + trunc int + }{ + {"PacketTooBig", 1, nil, header.ICMPv6PacketTooBig, 0, stack.ControlPacketTooBig, mtu, 0}, + {"Truncated (10 bytes missing)", 0, nil, header.ICMPv6PacketTooBig, 0, stack.ControlPacketTooBig, mtu, 10}, + {"Truncated (missing IPv6 header)", 0, nil, header.ICMPv6PacketTooBig, 0, stack.ControlPacketTooBig, mtu, header.IPv6MinimumSize + 8}, + {"Truncated PacketTooBig (missing 'extra info')", 0, nil, header.ICMPv6PacketTooBig, 0, stack.ControlPacketTooBig, mtu, 4 + header.IPv6MinimumSize + 8}, + {"Truncated (missing ICMP header)", 0, nil, header.ICMPv6PacketTooBig, 0, stack.ControlPacketTooBig, mtu, header.ICMPv6PacketTooBigMinimumSize + header.IPv6MinimumSize + 8}, + {"Port unreachable", 1, nil, header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, stack.ControlPortUnreachable, 0, 0}, + {"Truncated DstUnreachable (missing 'extra info')", 0, nil, header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, stack.ControlPortUnreachable, 0, 4 + header.IPv6MinimumSize + 8}, + {"Fragmented, zero offset", 1, newUint16(0), header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, stack.ControlPortUnreachable, 0, 0}, + {"Non-zero fragment offset", 0, newUint16(100), header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, stack.ControlPortUnreachable, 0, 0}, + {"Zero-length packet", 0, nil, header.ICMPv6DstUnreachable, header.ICMPv6PortUnreachable, stack.ControlPortUnreachable, 0, 2*header.IPv6MinimumSize + header.ICMPv6DstUnreachableMinimumSize + 8}, + } + r := stack.Route{ + LocalAddress: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", + RemoteAddress: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa", + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + var views [1]buffer.View + o := testObject{t: t} + proto := ipv6.NewProtocol() + ep, err := proto.NewEndpoint(1, "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", nil, &o, nil) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + defer ep.Close() + + dataOffset := header.IPv6MinimumSize*2 + header.ICMPv6MinimumSize + 4 + if c.fragmentOffset != nil { + dataOffset += header.IPv6FragmentHeaderSize + } + view := buffer.NewView(dataOffset + 8) + + // Create the outer IPv6 header. + ip := header.IPv6(view) + ip.Encode(&header.IPv6Fields{ + PayloadLength: uint16(len(view) - header.IPv6MinimumSize - c.trunc), + NextHeader: uint8(header.ICMPv6ProtocolNumber), + HopLimit: 20, + SrcAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa", + DstAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", + }) + + // Create the ICMP header. + icmp := header.ICMPv6(view[header.IPv6MinimumSize:]) + icmp.SetType(c.typ) + icmp.SetCode(c.code) + copy(view[header.IPv6MinimumSize+header.ICMPv6MinimumSize:], []byte{0xde, 0xad, 0xbe, 0xef}) + + // Create the inner IPv6 header. + ip = header.IPv6(view[header.IPv6MinimumSize+header.ICMPv6MinimumSize+4:]) + ip.Encode(&header.IPv6Fields{ + PayloadLength: 100, + NextHeader: 10, + HopLimit: 20, + SrcAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", + DstAddr: "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02", + }) + + // Build the fragmentation header if needed. + if c.fragmentOffset != nil { + ip.SetNextHeader(header.IPv6FragmentHeader) + frag := header.IPv6Fragment(view[2*header.IPv6MinimumSize+header.ICMPv6MinimumSize+4:]) + frag.Encode(&header.IPv6FragmentFields{ + NextHeader: 10, + FragmentOffset: *c.fragmentOffset, + M: true, + Identification: 0x12345678, + }) + } + + // Make payload be non-zero. + for i := dataOffset; i < len(view); i++ { + view[i] = uint8(i) + } + + // Give packet to IPv6 endpoint, dispatcher will validate that + // it's ok. + o.protocol = 10 + o.srcAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + o.dstAddr = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + o.contents = view[dataOffset:] + o.typ = c.expectedTyp + o.extra = c.expectedExtra + + vv := view.ToVectorisedView(views) + vv.CapLength(len(view) - c.trunc) + ep.HandlePacket(&r, &vv) + if want := c.expectedCount; o.controlCalls != want { + t.Fatalf("Bad number of control calls for %q case: got %v, want %v", c.name, o.controlCalls, want) + } + }) + } +} diff --git a/pkg/tcpip/network/ipv4/BUILD b/pkg/tcpip/network/ipv4/BUILD new file mode 100644 index 000000000..9df113df1 --- /dev/null +++ b/pkg/tcpip/network/ipv4/BUILD @@ -0,0 +1,38 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "ipv4", + srcs = [ + "icmp.go", + "ipv4.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/network/fragmentation", + "//pkg/tcpip/network/hash", + "//pkg/tcpip/stack", + "//pkg/waiter", + ], +) + +go_test( + name = "ipv4_test", + size = "small", + srcs = ["icmp_test.go"], + deps = [ + ":ipv4", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/link/channel", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/network/ipv4/icmp.go b/pkg/tcpip/network/ipv4/icmp.go new file mode 100644 index 000000000..ffd761350 --- /dev/null +++ b/pkg/tcpip/network/ipv4/icmp.go @@ -0,0 +1,282 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "context" + "encoding/binary" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// PingProtocolName is a pseudo transport protocol used to handle ping replies. +// Use it when constructing a stack that intends to use ipv4.Ping. +const PingProtocolName = "icmpv4ping" + +// pingProtocolNumber is a fake transport protocol used to +// deliver incoming ICMP echo replies. The ICMP identifier +// number is used as a port number for multiplexing. +const pingProtocolNumber tcpip.TransportProtocolNumber = 256 + 11 + +// handleControl handles the case when an ICMP packet contains the headers of +// the original packet that caused the ICMP one to be sent. This information is +// used to find out which transport endpoint must be notified about the ICMP +// packet. +func (e *endpoint) handleControl(typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { + h := header.IPv4(vv.First()) + + // We don't use IsValid() here because ICMP only requires that the IP + // header plus 8 bytes of the transport header be included. So it's + // likely that it is truncated, which would cause IsValid to return + // false. + // + // Drop packet if it doesn't have the basic IPv4 header or if the + // original source address doesn't match the endpoint's address. + if len(h) < header.IPv4MinimumSize || h.SourceAddress() != e.id.LocalAddress { + return + } + + hlen := int(h.HeaderLength()) + if vv.Size() < hlen || h.FragmentOffset() != 0 { + // We won't be able to handle this if it doesn't contain the + // full IPv4 header, or if it's a fragment not at offset 0 + // (because it won't have the transport header). + return + } + + // Skip the ip header, then deliver control message. + vv.TrimFront(hlen) + p := h.TransportProtocol() + e.dispatcher.DeliverTransportControlPacket(e.id.LocalAddress, h.DestinationAddress(), ProtocolNumber, p, typ, extra, vv) +} + +func (e *endpoint) handleICMP(r *stack.Route, vv *buffer.VectorisedView) { + v := vv.First() + if len(v) < header.ICMPv4MinimumSize { + return + } + h := header.ICMPv4(v) + + switch h.Type() { + case header.ICMPv4Echo: + if len(v) < header.ICMPv4EchoMinimumSize { + return + } + vv.TrimFront(header.ICMPv4MinimumSize) + req := echoRequest{r: r.Clone(), v: vv.ToView()} + select { + case e.echoRequests <- req: + default: + req.r.Release() + } + + case header.ICMPv4EchoReply: + e.dispatcher.DeliverTransportPacket(r, pingProtocolNumber, vv) + + case header.ICMPv4DstUnreachable: + if len(v) < header.ICMPv4DstUnreachableMinimumSize { + return + } + vv.TrimFront(header.ICMPv4DstUnreachableMinimumSize) + switch h.Code() { + case header.ICMPv4PortUnreachable: + e.handleControl(stack.ControlPortUnreachable, 0, vv) + + case header.ICMPv4FragmentationNeeded: + mtu := uint32(binary.BigEndian.Uint16(v[header.ICMPv4DstUnreachableMinimumSize-2:])) + e.handleControl(stack.ControlPacketTooBig, calculateMTU(mtu), vv) + } + } + // TODO: Handle other ICMP types. +} + +type echoRequest struct { + r stack.Route + v buffer.View +} + +func (e *endpoint) echoReplier() { + for req := range e.echoRequests { + sendICMPv4(&req.r, header.ICMPv4EchoReply, 0, req.v) + req.r.Release() + } +} + +func sendICMPv4(r *stack.Route, typ header.ICMPv4Type, code byte, data buffer.View) *tcpip.Error { + hdr := buffer.NewPrependable(header.ICMPv4MinimumSize + int(r.MaxHeaderLength())) + + icmpv4 := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize)) + icmpv4.SetType(typ) + icmpv4.SetCode(code) + icmpv4.SetChecksum(^header.Checksum(icmpv4, header.Checksum(data, 0))) + + return r.WritePacket(&hdr, data, header.ICMPv4ProtocolNumber) +} + +// A Pinger can send echo requests to an address. +type Pinger struct { + Stack *stack.Stack + NICID tcpip.NICID + Addr tcpip.Address + LocalAddr tcpip.Address // optional + Wait time.Duration // if zero, defaults to 1 second + Count uint16 // if zero, defaults to MaxUint16 +} + +// Ping sends echo requests to an ICMPv4 endpoint. +// Responses are streamed to the channel ch. +func (p *Pinger) Ping(ctx context.Context, ch chan<- PingReply) *tcpip.Error { + count := p.Count + if count == 0 { + count = 1<<16 - 1 + } + wait := p.Wait + if wait == 0 { + wait = 1 * time.Second + } + + r, err := p.Stack.FindRoute(p.NICID, p.LocalAddr, p.Addr, ProtocolNumber) + if err != nil { + return err + } + + netProtos := []tcpip.NetworkProtocolNumber{ProtocolNumber} + ep := &pingEndpoint{ + stack: p.Stack, + pktCh: make(chan buffer.View, 1), + } + id := stack.TransportEndpointID{ + LocalAddress: r.LocalAddress, + RemoteAddress: p.Addr, + } + + _, err = p.Stack.PickEphemeralPort(func(port uint16) (bool, *tcpip.Error) { + id.LocalPort = port + err := p.Stack.RegisterTransportEndpoint(p.NICID, netProtos, pingProtocolNumber, id, ep) + switch err { + case nil: + return true, nil + case tcpip.ErrPortInUse: + return false, nil + default: + return false, err + } + }) + if err != nil { + return err + } + defer p.Stack.UnregisterTransportEndpoint(p.NICID, netProtos, pingProtocolNumber, id) + + v := buffer.NewView(4) + binary.BigEndian.PutUint16(v[0:], id.LocalPort) + + start := time.Now() + + done := make(chan struct{}) + go func(count int) { + loop: + for ; count > 0; count-- { + select { + case v := <-ep.pktCh: + seq := binary.BigEndian.Uint16(v[header.ICMPv4MinimumSize+2:]) + ch <- PingReply{ + Duration: time.Since(start) - time.Duration(seq)*wait, + SeqNumber: seq, + } + case <-ctx.Done(): + break loop + } + } + close(done) + }(int(count)) + defer func() { <-done }() + + t := time.NewTicker(wait) + defer t.Stop() + for seq := uint16(0); seq < count; seq++ { + select { + case <-t.C: + case <-ctx.Done(): + return nil + } + binary.BigEndian.PutUint16(v[2:], seq) + sent := time.Now() + if err := sendICMPv4(&r, header.ICMPv4Echo, 0, v); err != nil { + ch <- PingReply{ + Error: err, + Duration: time.Since(sent), + SeqNumber: seq, + } + } + } + return nil +} + +// PingReply summarizes an ICMP echo reply. +type PingReply struct { + Error *tcpip.Error // reports any errors sending a ping request + Duration time.Duration + SeqNumber uint16 +} + +type pingProtocol struct{} + +func (*pingProtocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + return nil, tcpip.ErrNotSupported // endpoints are created directly +} + +func (*pingProtocol) Number() tcpip.TransportProtocolNumber { return pingProtocolNumber } + +func (*pingProtocol) MinimumPacketSize() int { return header.ICMPv4EchoMinimumSize } + +func (*pingProtocol) ParsePorts(v buffer.View) (src, dst uint16, err *tcpip.Error) { + ident := binary.BigEndian.Uint16(v[4:]) + return 0, ident, nil +} + +func (*pingProtocol) HandleUnknownDestinationPacket(*stack.Route, stack.TransportEndpointID, *buffer.VectorisedView) bool { + return true +} + +// SetOption implements TransportProtocol.SetOption. +func (p *pingProtocol) SetOption(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// Option implements TransportProtocol.Option. +func (p *pingProtocol) Option(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +func init() { + stack.RegisterTransportProtocolFactory(PingProtocolName, func() stack.TransportProtocol { + return &pingProtocol{} + }) +} + +type pingEndpoint struct { + stack *stack.Stack + pktCh chan buffer.View +} + +func (e *pingEndpoint) Close() { + close(e.pktCh) +} + +func (e *pingEndpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) { + select { + case e.pktCh <- vv.ToView(): + default: + } +} + +// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket. +func (e *pingEndpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { +} diff --git a/pkg/tcpip/network/ipv4/icmp_test.go b/pkg/tcpip/network/ipv4/icmp_test.go new file mode 100644 index 000000000..378fba74b --- /dev/null +++ b/pkg/tcpip/network/ipv4/icmp_test.go @@ -0,0 +1,124 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "context" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const stackAddr = "\x0a\x00\x00\x01" + +type testContext struct { + t *testing.T + linkEP *channel.Endpoint + s *stack.Stack +} + +func newTestContext(t *testing.T) *testContext { + s := stack.New([]string{ipv4.ProtocolName}, []string{ipv4.PingProtocolName}) + + const defaultMTU = 65536 + id, linkEP := channel.New(256, defaultMTU, "") + if testing.Verbose() { + id = sniffer.New(id) + } + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{ + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }}) + + return &testContext{ + t: t, + s: s, + linkEP: linkEP, + } +} + +func (c *testContext) cleanup() { + close(c.linkEP.C) +} + +func (c *testContext) loopback() { + go func() { + for pkt := range c.linkEP.C { + v := make(buffer.View, len(pkt.Header)+len(pkt.Payload)) + copy(v, pkt.Header) + copy(v[len(pkt.Header):], pkt.Payload) + vv := v.ToVectorisedView([1]buffer.View{}) + c.linkEP.Inject(pkt.Proto, &vv) + } + }() +} + +func TestEcho(t *testing.T) { + c := newTestContext(t) + defer c.cleanup() + c.loopback() + + ch := make(chan ipv4.PingReply, 1) + p := ipv4.Pinger{ + Stack: c.s, + NICID: 1, + Addr: stackAddr, + Wait: 10 * time.Millisecond, + Count: 1, // one ping only + } + if err := p.Ping(context.Background(), ch); err != nil { + t.Fatalf("icmp.Ping failed: %v", err) + } + + ping := <-ch + if ping.Error != nil { + t.Errorf("bad ping response: %v", ping.Error) + } +} + +func TestEchoSequence(t *testing.T) { + c := newTestContext(t) + defer c.cleanup() + c.loopback() + + const numPings = 3 + ch := make(chan ipv4.PingReply, numPings) + p := ipv4.Pinger{ + Stack: c.s, + NICID: 1, + Addr: stackAddr, + Wait: 10 * time.Millisecond, + Count: numPings, + } + if err := p.Ping(context.Background(), ch); err != nil { + t.Fatalf("icmp.Ping failed: %v", err) + } + + for i := uint16(0); i < numPings; i++ { + ping := <-ch + if ping.Error != nil { + t.Errorf("i=%d bad ping response: %v", i, ping.Error) + } + if ping.SeqNumber != i { + t.Errorf("SeqNumber=%d, want %d", ping.SeqNumber, i) + } + } +} diff --git a/pkg/tcpip/network/ipv4/ipv4.go b/pkg/tcpip/network/ipv4/ipv4.go new file mode 100644 index 000000000..4cc2a2fd4 --- /dev/null +++ b/pkg/tcpip/network/ipv4/ipv4.go @@ -0,0 +1,233 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 contains the implementation of the ipv4 network protocol. To use +// it in the networking stack, this package must be added to the project, and +// activated on the stack by passing ipv4.ProtocolName (or "ipv4") as one of the +// network protocols when calling stack.New(). Then endpoints can be created +// by passing ipv4.ProtocolNumber as the network protocol number when calling +// Stack.NewEndpoint(). +package ipv4 + +import ( + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/fragmentation" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/hash" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + // ProtocolName is the string representation of the ipv4 protocol name. + ProtocolName = "ipv4" + + // ProtocolNumber is the ipv4 protocol number. + ProtocolNumber = header.IPv4ProtocolNumber + + // maxTotalSize is maximum size that can be encoded in the 16-bit + // TotalLength field of the ipv4 header. + maxTotalSize = 0xffff + + // buckets is the number of identifier buckets. + buckets = 2048 +) + +type address [header.IPv4AddressSize]byte + +type endpoint struct { + nicid tcpip.NICID + id stack.NetworkEndpointID + address address + linkEP stack.LinkEndpoint + dispatcher stack.TransportDispatcher + echoRequests chan echoRequest + fragmentation *fragmentation.Fragmentation +} + +func newEndpoint(nicid tcpip.NICID, addr tcpip.Address, dispatcher stack.TransportDispatcher, linkEP stack.LinkEndpoint) *endpoint { + e := &endpoint{ + nicid: nicid, + linkEP: linkEP, + dispatcher: dispatcher, + echoRequests: make(chan echoRequest, 10), + fragmentation: fragmentation.NewFragmentation(fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, fragmentation.DefaultReassembleTimeout), + } + copy(e.address[:], addr) + e.id = stack.NetworkEndpointID{tcpip.Address(e.address[:])} + + go e.echoReplier() + + return e +} + +// MTU implements stack.NetworkEndpoint.MTU. It returns the link-layer MTU minus +// the network layer max header length. +func (e *endpoint) MTU() uint32 { + return calculateMTU(e.linkEP.MTU()) +} + +// Capabilities implements stack.NetworkEndpoint.Capabilities. +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.linkEP.Capabilities() +} + +// NICID returns the ID of the NIC this endpoint belongs to. +func (e *endpoint) NICID() tcpip.NICID { + return e.nicid +} + +// ID returns the ipv4 endpoint ID. +func (e *endpoint) ID() *stack.NetworkEndpointID { + return &e.id +} + +// MaxHeaderLength returns the maximum length needed by ipv4 headers (and +// underlying protocols). +func (e *endpoint) MaxHeaderLength() uint16 { + return e.linkEP.MaxHeaderLength() + header.IPv4MinimumSize +} + +// WritePacket writes a packet to the given destination address and protocol. +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error { + ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize)) + length := uint16(hdr.UsedLength() + len(payload)) + id := uint32(0) + if length > header.IPv4MaximumHeaderSize+8 { + // Packets of 68 bytes or less are required by RFC 791 to not be + // fragmented, so we only assign ids to larger packets. + id = atomic.AddUint32(&ids[hashRoute(r, protocol)%buckets], 1) + } + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: length, + ID: uint16(id), + TTL: 65, + Protocol: uint8(protocol), + SrcAddr: tcpip.Address(e.address[:]), + DstAddr: r.RemoteAddress, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + return e.linkEP.WritePacket(r, hdr, payload, ProtocolNumber) +} + +// HandlePacket is called by the link layer when new ipv4 packets arrive for +// this endpoint. +func (e *endpoint) HandlePacket(r *stack.Route, vv *buffer.VectorisedView) { + h := header.IPv4(vv.First()) + if !h.IsValid(vv.Size()) { + return + } + + hlen := int(h.HeaderLength()) + tlen := int(h.TotalLength()) + vv.TrimFront(hlen) + vv.CapLength(tlen - hlen) + + more := (h.Flags() & header.IPv4FlagMoreFragments) != 0 + if more || h.FragmentOffset() != 0 { + // The packet is a fragment, let's try to reassemble it. + last := h.FragmentOffset() + uint16(vv.Size()) - 1 + tt, ready := e.fragmentation.Process(hash.IPv4FragmentHash(h), h.FragmentOffset(), last, more, vv) + if !ready { + return + } + vv = &tt + } + p := h.TransportProtocol() + if p == header.ICMPv4ProtocolNumber { + e.handleICMP(r, vv) + return + } + e.dispatcher.DeliverTransportPacket(r, p, vv) +} + +// Close cleans up resources associated with the endpoint. +func (e *endpoint) Close() { + close(e.echoRequests) +} + +type protocol struct{} + +// NewProtocol creates a new protocol ipv4 protocol descriptor. This is exported +// only for tests that short-circuit the stack. Regular use of the protocol is +// done via the stack, which gets a protocol descriptor from the init() function +// below. +func NewProtocol() stack.NetworkProtocol { + return &protocol{} +} + +// Number returns the ipv4 protocol number. +func (p *protocol) Number() tcpip.NetworkProtocolNumber { + return ProtocolNumber +} + +// MinimumPacketSize returns the minimum valid ipv4 packet size. +func (p *protocol) MinimumPacketSize() int { + return header.IPv4MinimumSize +} + +// ParseAddresses implements NetworkProtocol.ParseAddresses. +func (*protocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) { + h := header.IPv4(v) + return h.SourceAddress(), h.DestinationAddress() +} + +// NewEndpoint creates a new ipv4 endpoint. +func (p *protocol) NewEndpoint(nicid tcpip.NICID, addr tcpip.Address, linkAddrCache stack.LinkAddressCache, dispatcher stack.TransportDispatcher, linkEP stack.LinkEndpoint) (stack.NetworkEndpoint, *tcpip.Error) { + return newEndpoint(nicid, addr, dispatcher, linkEP), nil +} + +// SetOption implements NetworkProtocol.SetOption. +func (p *protocol) SetOption(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// Option implements NetworkProtocol.Option. +func (p *protocol) Option(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// calculateMTU calculates the network-layer payload MTU based on the link-layer +// payload mtu. +func calculateMTU(mtu uint32) uint32 { + if mtu > maxTotalSize { + mtu = maxTotalSize + } + return mtu - header.IPv4MinimumSize +} + +// hashRoute calculates a hash value for the given route. It uses the source & +// destination address, the transport protocol number, and a random initial +// value (generated once on initialization) to generate the hash. +func hashRoute(r *stack.Route, protocol tcpip.TransportProtocolNumber) uint32 { + t := r.LocalAddress + a := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + t = r.RemoteAddress + b := uint32(t[0]) | uint32(t[1])<<8 | uint32(t[2])<<16 | uint32(t[3])<<24 + return hash.Hash3Words(a, b, uint32(protocol), hashIV) +} + +var ( + ids []uint32 + hashIV uint32 +) + +func init() { + ids = make([]uint32, buckets) + + // Randomly initialize hashIV and the ids. + r := hash.RandN32(1 + buckets) + for i := range ids { + ids[i] = r[i] + } + hashIV = r[buckets] + + stack.RegisterNetworkProtocolFactory(ProtocolName, func() stack.NetworkProtocol { + return &protocol{} + }) +} diff --git a/pkg/tcpip/network/ipv6/BUILD b/pkg/tcpip/network/ipv6/BUILD new file mode 100644 index 000000000..db7da0af3 --- /dev/null +++ b/pkg/tcpip/network/ipv6/BUILD @@ -0,0 +1,21 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ipv6", + srcs = [ + "icmp.go", + "ipv6.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/stack", + ], +) diff --git a/pkg/tcpip/network/ipv6/icmp.go b/pkg/tcpip/network/ipv6/icmp.go new file mode 100644 index 000000000..0fc6dcce2 --- /dev/null +++ b/pkg/tcpip/network/ipv6/icmp.go @@ -0,0 +1,80 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// handleControl handles the case when an ICMP packet contains the headers of +// the original packet that caused the ICMP one to be sent. This information is +// used to find out which transport endpoint must be notified about the ICMP +// packet. +func (e *endpoint) handleControl(typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { + h := header.IPv6(vv.First()) + + // We don't use IsValid() here because ICMP only requires that up to + // 1280 bytes of the original packet be included. So it's likely that it + // is truncated, which would cause IsValid to return false. + // + // Drop packet if it doesn't have the basic IPv6 header or if the + // original source address doesn't match the endpoint's address. + if len(h) < header.IPv6MinimumSize || h.SourceAddress() != e.id.LocalAddress { + return + } + + // Skip the IP header, then handle the fragmentation header if there + // is one. + vv.TrimFront(header.IPv6MinimumSize) + p := h.TransportProtocol() + if p == header.IPv6FragmentHeader { + f := header.IPv6Fragment(vv.First()) + if !f.IsValid() || f.FragmentOffset() != 0 { + // We can't handle fragments that aren't at offset 0 + // because they don't have the transport headers. + return + } + + // Skip fragmentation header and find out the actual protocol + // number. + vv.TrimFront(header.IPv6FragmentHeaderSize) + p = f.TransportProtocol() + } + + // Deliver the control packet to the transport endpoint. + e.dispatcher.DeliverTransportControlPacket(e.id.LocalAddress, h.DestinationAddress(), ProtocolNumber, p, typ, extra, vv) +} + +func (e *endpoint) handleICMP(r *stack.Route, vv *buffer.VectorisedView) { + v := vv.First() + if len(v) < header.ICMPv6MinimumSize { + return + } + h := header.ICMPv6(v) + + switch h.Type() { + case header.ICMPv6PacketTooBig: + if len(v) < header.ICMPv6PacketTooBigMinimumSize { + return + } + vv.TrimFront(header.ICMPv6PacketTooBigMinimumSize) + mtu := binary.BigEndian.Uint32(v[header.ICMPv6MinimumSize:]) + e.handleControl(stack.ControlPacketTooBig, calculateMTU(mtu), vv) + + case header.ICMPv6DstUnreachable: + if len(v) < header.ICMPv6DstUnreachableMinimumSize { + return + } + vv.TrimFront(header.ICMPv6DstUnreachableMinimumSize) + switch h.Code() { + case header.ICMPv6PortUnreachable: + e.handleControl(stack.ControlPortUnreachable, 0, vv) + } + } +} diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go new file mode 100644 index 000000000..15654cbbd --- /dev/null +++ b/pkg/tcpip/network/ipv6/ipv6.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 contains the implementation of the ipv6 network protocol. To use +// it in the networking stack, this package must be added to the project, and +// activated on the stack by passing ipv6.ProtocolName (or "ipv6") as one of the +// network protocols when calling stack.New(). Then endpoints can be created +// by passing ipv6.ProtocolNumber as the network protocol number when calling +// Stack.NewEndpoint(). +package ipv6 + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + // ProtocolName is the string representation of the ipv6 protocol name. + ProtocolName = "ipv6" + + // ProtocolNumber is the ipv6 protocol number. + ProtocolNumber = header.IPv6ProtocolNumber + + // maxTotalSize is maximum size that can be encoded in the 16-bit + // PayloadLength field of the ipv6 header. + maxPayloadSize = 0xffff +) + +type address [header.IPv6AddressSize]byte + +type endpoint struct { + nicid tcpip.NICID + id stack.NetworkEndpointID + address address + linkEP stack.LinkEndpoint + dispatcher stack.TransportDispatcher +} + +func newEndpoint(nicid tcpip.NICID, addr tcpip.Address, dispatcher stack.TransportDispatcher, linkEP stack.LinkEndpoint) *endpoint { + e := &endpoint{nicid: nicid, linkEP: linkEP, dispatcher: dispatcher} + copy(e.address[:], addr) + e.id = stack.NetworkEndpointID{tcpip.Address(e.address[:])} + return e +} + +// MTU implements stack.NetworkEndpoint.MTU. It returns the link-layer MTU minus +// the network layer max header length. +func (e *endpoint) MTU() uint32 { + return calculateMTU(e.linkEP.MTU()) +} + +// NICID returns the ID of the NIC this endpoint belongs to. +func (e *endpoint) NICID() tcpip.NICID { + return e.nicid +} + +// ID returns the ipv6 endpoint ID. +func (e *endpoint) ID() *stack.NetworkEndpointID { + return &e.id +} + +// Capabilities implements stack.NetworkEndpoint.Capabilities. +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return e.linkEP.Capabilities() +} + +// MaxHeaderLength returns the maximum length needed by ipv6 headers (and +// underlying protocols). +func (e *endpoint) MaxHeaderLength() uint16 { + return e.linkEP.MaxHeaderLength() + header.IPv6MinimumSize +} + +// WritePacket writes a packet to the given destination address and protocol. +func (e *endpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error { + length := uint16(hdr.UsedLength()) + if payload != nil { + length += uint16(len(payload)) + } + ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize)) + ip.Encode(&header.IPv6Fields{ + PayloadLength: length, + NextHeader: uint8(protocol), + HopLimit: 65, + SrcAddr: tcpip.Address(e.address[:]), + DstAddr: r.RemoteAddress, + }) + + return e.linkEP.WritePacket(r, hdr, payload, ProtocolNumber) +} + +// HandlePacket is called by the link layer when new ipv6 packets arrive for +// this endpoint. +func (e *endpoint) HandlePacket(r *stack.Route, vv *buffer.VectorisedView) { + h := header.IPv6(vv.First()) + if !h.IsValid(vv.Size()) { + return + } + + vv.TrimFront(header.IPv6MinimumSize) + vv.CapLength(int(h.PayloadLength())) + + p := h.TransportProtocol() + if p == header.ICMPv6ProtocolNumber { + e.handleICMP(r, vv) + return + } + + e.dispatcher.DeliverTransportPacket(r, p, vv) +} + +// Close cleans up resources associated with the endpoint. +func (*endpoint) Close() {} + +type protocol struct{} + +// NewProtocol creates a new protocol ipv6 protocol descriptor. This is exported +// only for tests that short-circuit the stack. Regular use of the protocol is +// done via the stack, which gets a protocol descriptor from the init() function +// below. +func NewProtocol() stack.NetworkProtocol { + return &protocol{} +} + +// Number returns the ipv6 protocol number. +func (p *protocol) Number() tcpip.NetworkProtocolNumber { + return ProtocolNumber +} + +// MinimumPacketSize returns the minimum valid ipv6 packet size. +func (p *protocol) MinimumPacketSize() int { + return header.IPv6MinimumSize +} + +// ParseAddresses implements NetworkProtocol.ParseAddresses. +func (*protocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) { + h := header.IPv6(v) + return h.SourceAddress(), h.DestinationAddress() +} + +// NewEndpoint creates a new ipv6 endpoint. +func (p *protocol) NewEndpoint(nicid tcpip.NICID, addr tcpip.Address, linkAddrCache stack.LinkAddressCache, dispatcher stack.TransportDispatcher, linkEP stack.LinkEndpoint) (stack.NetworkEndpoint, *tcpip.Error) { + return newEndpoint(nicid, addr, dispatcher, linkEP), nil +} + +// SetOption implements NetworkProtocol.SetOption. +func (p *protocol) SetOption(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// Option implements NetworkProtocol.Option. +func (p *protocol) Option(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// calculateMTU calculates the network-layer payload MTU based on the link-layer +// payload mtu. +func calculateMTU(mtu uint32) uint32 { + mtu -= header.IPv6MinimumSize + if mtu <= maxPayloadSize { + return mtu + } + return maxPayloadSize +} + +func init() { + stack.RegisterNetworkProtocolFactory(ProtocolName, func() stack.NetworkProtocol { + return &protocol{} + }) +} diff --git a/pkg/tcpip/ports/BUILD b/pkg/tcpip/ports/BUILD new file mode 100644 index 000000000..e0140cea6 --- /dev/null +++ b/pkg/tcpip/ports/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "ports", + srcs = ["ports.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/ports", + visibility = ["//:sandbox"], + deps = ["//pkg/tcpip"], +) + +go_test( + name = "ports_test", + srcs = ["ports_test.go"], + embed = [":ports"], + deps = [ + "//pkg/tcpip", + ], +) diff --git a/pkg/tcpip/ports/ports.go b/pkg/tcpip/ports/ports.go new file mode 100644 index 000000000..24f3095d6 --- /dev/null +++ b/pkg/tcpip/ports/ports.go @@ -0,0 +1,148 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ports provides PortManager that manages allocating, reserving and releasing ports. +package ports + +import ( + "math" + "math/rand" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + // firstEphemeral is the first ephemeral port. + firstEphemeral uint16 = 16000 + + anyIPAddress = tcpip.Address("") +) + +type portDescriptor struct { + network tcpip.NetworkProtocolNumber + transport tcpip.TransportProtocolNumber + port uint16 +} + +// PortManager manages allocating, reserving and releasing ports. +type PortManager struct { + mu sync.RWMutex + allocatedPorts map[portDescriptor]bindAddresses +} + +// bindAddresses is a set of IP addresses. +type bindAddresses map[tcpip.Address]struct{} + +// isAvailable checks whether an IP address is available to bind to. +func (b bindAddresses) isAvailable(addr tcpip.Address) bool { + if addr == anyIPAddress { + return len(b) == 0 + } + + // If all addresses for this portDescriptor are already bound, no + // address is available. + if _, ok := b[anyIPAddress]; ok { + return false + } + + if _, ok := b[addr]; ok { + return false + } + return true +} + +// NewPortManager creates new PortManager. +func NewPortManager() *PortManager { + return &PortManager{allocatedPorts: make(map[portDescriptor]bindAddresses)} +} + +// PickEphemeralPort randomly chooses a starting point and iterates over all +// possible ephemeral ports, allowing the caller to decide whether a given port +// is suitable for its needs, and stopping when a port is found or an error +// occurs. +func (s *PortManager) PickEphemeralPort(testPort func(p uint16) (bool, *tcpip.Error)) (port uint16, err *tcpip.Error) { + count := uint16(math.MaxUint16 - firstEphemeral + 1) + offset := uint16(rand.Int31n(int32(count))) + + for i := uint16(0); i < count; i++ { + port = firstEphemeral + (offset+i)%count + ok, err := testPort(port) + if err != nil { + return 0, err + } + + if ok { + return port, nil + } + } + + return 0, tcpip.ErrNoPortAvailable +} + +// ReservePort marks a port/IP combination as reserved so that it cannot be +// reserved by another endpoint. If port is zero, ReservePort will search for +// an unreserved ephemeral port and reserve it, returning its value in the +// "port" return value. +func (s *PortManager) ReservePort(network []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16) (reservedPort uint16, err *tcpip.Error) { + s.mu.Lock() + defer s.mu.Unlock() + + // If a port is specified, just try to reserve it for all network + // protocols. + if port != 0 { + if !s.reserveSpecificPort(network, transport, addr, port) { + return 0, tcpip.ErrPortInUse + } + return port, nil + } + + // A port wasn't specified, so try to find one. + return s.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) { + return s.reserveSpecificPort(network, transport, addr, p), nil + }) +} + +// reserveSpecificPort tries to reserve the given port on all given protocols. +func (s *PortManager) reserveSpecificPort(network []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16) bool { + // Check that the port is available on all network protocols. + desc := portDescriptor{0, transport, port} + for _, n := range network { + desc.network = n + if addrs, ok := s.allocatedPorts[desc]; ok { + if !addrs.isAvailable(addr) { + return false + } + } + } + + // Reserve port on all network protocols. + for _, n := range network { + desc.network = n + m, ok := s.allocatedPorts[desc] + if !ok { + m = make(bindAddresses) + s.allocatedPorts[desc] = m + } + m[addr] = struct{}{} + } + + return true +} + +// ReleasePort releases the reservation on a port/IP combination so that it can +// be reserved by other endpoints. +func (s *PortManager) ReleasePort(network []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, n := range network { + desc := portDescriptor{n, transport, port} + m := s.allocatedPorts[desc] + delete(m, addr) + if len(m) == 0 { + delete(s.allocatedPorts, desc) + } + } +} diff --git a/pkg/tcpip/ports/ports_test.go b/pkg/tcpip/ports/ports_test.go new file mode 100644 index 000000000..9a4c702b2 --- /dev/null +++ b/pkg/tcpip/ports/ports_test.go @@ -0,0 +1,134 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package ports + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const ( + fakeTransNumber tcpip.TransportProtocolNumber = 1 + fakeNetworkNumber tcpip.NetworkProtocolNumber = 2 + + fakeIPAddress = tcpip.Address("\x08\x08\x08\x08") + fakeIPAddress1 = tcpip.Address("\x08\x08\x08\x09") +) + +func TestPortReservation(t *testing.T) { + pm := NewPortManager() + net := []tcpip.NetworkProtocolNumber{fakeNetworkNumber} + + for _, test := range []struct { + port uint16 + ip tcpip.Address + want *tcpip.Error + }{ + { + port: 80, + ip: fakeIPAddress, + want: nil, + }, + { + port: 80, + ip: fakeIPAddress1, + want: nil, + }, + { + /* N.B. Order of tests matters! */ + port: 80, + ip: anyIPAddress, + want: tcpip.ErrPortInUse, + }, + { + port: 22, + ip: anyIPAddress, + want: nil, + }, + { + port: 22, + ip: fakeIPAddress, + want: tcpip.ErrPortInUse, + }, + { + port: 0, + ip: fakeIPAddress, + want: nil, + }, + { + port: 0, + ip: fakeIPAddress, + want: nil, + }, + } { + gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port) + if err != test.want { + t.Fatalf("ReservePort(.., .., %s, %d) = %v, want %v", test.ip, test.port, err, test.want) + } + if test.port == 0 && (gotPort == 0 || gotPort < firstEphemeral) { + t.Fatalf("ReservePort(.., .., .., 0) = %d, want port number >= %d to be picked", gotPort, firstEphemeral) + } + } + + // Release port 22 from any IP address, then try to reserve fake IP + // address on 22. + pm.ReleasePort(net, fakeTransNumber, anyIPAddress, 22) + + if port, err := pm.ReservePort(net, fakeTransNumber, fakeIPAddress, 22); port != 22 || err != nil { + t.Fatalf("ReservePort(.., .., .., %d) = (port %d, err %v), want (22, nil); failed to reserve port after it should have been released", 22, port, err) + } +} + +func TestPickEphemeralPort(t *testing.T) { + pm := NewPortManager() + customErr := &tcpip.Error{} + for _, test := range []struct { + name string + f func(port uint16) (bool, *tcpip.Error) + wantErr *tcpip.Error + wantPort uint16 + }{ + { + name: "no-port-available", + f: func(port uint16) (bool, *tcpip.Error) { + return false, nil + }, + wantErr: tcpip.ErrNoPortAvailable, + }, + { + name: "port-tester-error", + f: func(port uint16) (bool, *tcpip.Error) { + return false, customErr + }, + wantErr: customErr, + }, + { + name: "only-port-16042-available", + f: func(port uint16) (bool, *tcpip.Error) { + if port == firstEphemeral+42 { + return true, nil + } + return false, nil + }, + wantPort: firstEphemeral + 42, + }, + { + name: "only-port-under-16000-available", + f: func(port uint16) (bool, *tcpip.Error) { + if port < firstEphemeral { + return true, nil + } + return false, nil + }, + wantErr: tcpip.ErrNoPortAvailable, + }, + } { + t.Run(test.name, func(t *testing.T) { + if port, err := pm.PickEphemeralPort(test.f); port != test.wantPort || err != test.wantErr { + t.Errorf("PickEphemeralPort(..) = (port %d, err %v); want (port %d, err %v)", port, err, test.wantPort, test.wantErr) + } + }) + } +} diff --git a/pkg/tcpip/sample/tun_tcp_connect/BUILD b/pkg/tcpip/sample/tun_tcp_connect/BUILD new file mode 100644 index 000000000..870ee0433 --- /dev/null +++ b/pkg/tcpip/sample/tun_tcp_connect/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_binary") + +go_binary( + name = "tun_tcp_connect", + srcs = ["main.go"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/link/fdbased", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/link/tun", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/waiter", + ], +) diff --git a/pkg/tcpip/sample/tun_tcp_connect/main.go b/pkg/tcpip/sample/tun_tcp_connect/main.go new file mode 100644 index 000000000..332929c85 --- /dev/null +++ b/pkg/tcpip/sample/tun_tcp_connect/main.go @@ -0,0 +1,208 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This sample creates a stack with TCP and IPv4 protocols on top of a TUN +// device, and connects to a peer. Similar to "nc <address> <port>". While the +// sample is running, attempts to connect to its IPv4 address will result in +// a RST segment. +// +// As an example of how to run it, a TUN device can be created and enabled on +// a linux host as follows (this only needs to be done once per boot): +// +// [sudo] ip tuntap add user <username> mode tun <device-name> +// [sudo] ip link set <device-name> up +// [sudo] ip addr add <ipv4-address>/<mask-length> dev <device-name> +// +// A concrete example: +// +// $ sudo ip tuntap add user wedsonaf mode tun tun0 +// $ sudo ip link set tun0 up +// $ sudo ip addr add 192.168.1.1/24 dev tun0 +// +// Then one can run tun_tcp_connect as such: +// +// $ ./tun/tun_tcp_connect tun0 192.168.1.2 0 192.168.1.1 1234 +// +// This will attempt to connect to the linux host's stack. One can run nc in +// listen mode to accept a connect from tun_tcp_connect and exchange data. +package main + +import ( + "bufio" + "fmt" + "log" + "math/rand" + "net" + "os" + "strconv" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/fdbased" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/tun" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// writer reads from standard input and writes to the endpoint until standard +// input is closed. It signals that it's done by closing the provided channel. +func writer(ch chan struct{}, ep tcpip.Endpoint) { + defer func() { + ep.Shutdown(tcpip.ShutdownWrite) + close(ch) + }() + + r := bufio.NewReader(os.Stdin) + for { + v := buffer.NewView(1024) + n, err := r.Read(v) + if err != nil { + return + } + + v.CapLength(n) + for len(v) > 0 { + n, err := ep.Write(tcpip.SlicePayload(v), tcpip.WriteOptions{}) + if err != nil { + fmt.Println("Write failed:", err) + return + } + + v.TrimFront(int(n)) + } + } +} + +func main() { + if len(os.Args) != 6 { + log.Fatal("Usage: ", os.Args[0], " <tun-device> <local-ipv4-address> <local-port> <remote-ipv4-address> <remote-port>") + } + + tunName := os.Args[1] + addrName := os.Args[2] + portName := os.Args[3] + remoteAddrName := os.Args[4] + remotePortName := os.Args[5] + + rand.Seed(time.Now().UnixNano()) + + addr := tcpip.Address(net.ParseIP(addrName).To4()) + remote := tcpip.FullAddress{ + NIC: 1, + Addr: tcpip.Address(net.ParseIP(remoteAddrName).To4()), + } + + var localPort uint16 + if v, err := strconv.Atoi(portName); err != nil { + log.Fatalf("Unable to convert port %v: %v", portName, err) + } else { + localPort = uint16(v) + } + + if v, err := strconv.Atoi(remotePortName); err != nil { + log.Fatalf("Unable to convert port %v: %v", remotePortName, err) + } else { + remote.Port = uint16(v) + } + + // Create the stack with ipv4 and tcp protocols, then add a tun-based + // NIC and ipv4 address. + s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}) + + mtu, err := rawfile.GetMTU(tunName) + if err != nil { + log.Fatal(err) + } + + fd, err := tun.Open(tunName) + if err != nil { + log.Fatal(err) + } + + linkID := fdbased.New(&fdbased.Options{FD: fd, MTU: mtu}) + if err := s.CreateNIC(1, sniffer.New(linkID)); err != nil { + log.Fatal(err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, addr); err != nil { + log.Fatal(err) + } + + // Add default route. + s.SetRouteTable([]tcpip.Route{ + { + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + }) + + // Create TCP endpoint. + var wq waiter.Queue + ep, e := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + log.Fatal(e) + } + + // Bind if a port is specified. + if localPort != 0 { + if err := ep.Bind(tcpip.FullAddress{0, "", localPort}, nil); err != nil { + log.Fatal("Bind failed: ", err) + } + } + + // Issue connect request and wait for it to complete. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventOut) + terr := ep.Connect(remote) + if terr == tcpip.ErrConnectStarted { + fmt.Println("Connect is pending...") + <-notifyCh + terr = ep.GetSockOpt(tcpip.ErrorOption{}) + } + wq.EventUnregister(&waitEntry) + + if terr != nil { + log.Fatal("Unable to connect: ", terr) + } + + fmt.Println("Connected") + + // Start the writer in its own goroutine. + writerCompletedCh := make(chan struct{}) + go writer(writerCompletedCh, ep) // S/R-FIXME + + // Read data and write to standard output until the peer closes the + // connection from its side. + wq.EventRegister(&waitEntry, waiter.EventIn) + for { + v, err := ep.Read(nil) + if err != nil { + if err == tcpip.ErrClosedForReceive { + break + } + + if err == tcpip.ErrWouldBlock { + <-notifyCh + continue + } + + log.Fatal("Read() failed:", err) + } + + os.Stdout.Write(v) + } + wq.EventUnregister(&waitEntry) + + // The reader has completed. Now wait for the writer as well. + <-writerCompletedCh + + ep.Close() +} diff --git a/pkg/tcpip/sample/tun_tcp_echo/BUILD b/pkg/tcpip/sample/tun_tcp_echo/BUILD new file mode 100644 index 000000000..c51528a12 --- /dev/null +++ b/pkg/tcpip/sample/tun_tcp_echo/BUILD @@ -0,0 +1,20 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_binary") + +go_binary( + name = "tun_tcp_echo", + srcs = ["main.go"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/link/fdbased", + "//pkg/tcpip/link/rawfile", + "//pkg/tcpip/link/tun", + "//pkg/tcpip/network/arp", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/waiter", + ], +) diff --git a/pkg/tcpip/sample/tun_tcp_echo/main.go b/pkg/tcpip/sample/tun_tcp_echo/main.go new file mode 100644 index 000000000..10cd701af --- /dev/null +++ b/pkg/tcpip/sample/tun_tcp_echo/main.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This sample creates a stack with TCP and IPv4 protocols on top of a TUN +// device, and listens on a port. Data received by the server in the accepted +// connections is echoed back to the clients. +package main + +import ( + "flag" + "log" + "math/rand" + "net" + "os" + "strconv" + "strings" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/fdbased" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/rawfile" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/tun" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/arp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +var tap = flag.Bool("tap", false, "use tap istead of tun") +var mac = flag.String("mac", "aa:00:01:01:01:01", "mac address to use in tap device") + +func echo(wq *waiter.Queue, ep tcpip.Endpoint) { + defer ep.Close() + + // Create wait queue entry that notifies a channel. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + + wq.EventRegister(&waitEntry, waiter.EventIn) + defer wq.EventUnregister(&waitEntry) + + for { + v, err := ep.Read(nil) + if err != nil { + if err == tcpip.ErrWouldBlock { + <-notifyCh + continue + } + + return + } + + ep.Write(tcpip.SlicePayload(v), tcpip.WriteOptions{}) + } +} + +func main() { + flag.Parse() + if len(flag.Args()) != 3 { + log.Fatal("Usage: ", os.Args[0], " <tun-device> <local-address> <local-port>") + } + + tunName := flag.Arg(0) + addrName := flag.Arg(1) + portName := flag.Arg(2) + + rand.Seed(time.Now().UnixNano()) + + // Parse the mac address. + maddr, err := net.ParseMAC(*mac) + if err != nil { + log.Fatalf("Bad MAC address: %v", *mac) + } + + // Parse the IP address. Support both ipv4 and ipv6. + parsedAddr := net.ParseIP(addrName) + if parsedAddr == nil { + log.Fatalf("Bad IP address: %v", addrName) + } + + var addr tcpip.Address + var proto tcpip.NetworkProtocolNumber + if parsedAddr.To4() != nil { + addr = tcpip.Address(parsedAddr.To4()) + proto = ipv4.ProtocolNumber + } else if parsedAddr.To16() != nil { + addr = tcpip.Address(parsedAddr.To16()) + proto = ipv6.ProtocolNumber + } else { + log.Fatalf("Unknown IP type: %v", addrName) + } + + localPort, err := strconv.Atoi(portName) + if err != nil { + log.Fatalf("Unable to convert port %v: %v", portName, err) + } + + // Create the stack with ip and tcp protocols, then add a tun-based + // NIC and address. + s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName, arp.ProtocolName}, []string{tcp.ProtocolName}) + + mtu, err := rawfile.GetMTU(tunName) + if err != nil { + log.Fatal(err) + } + + var fd int + if *tap { + fd, err = tun.OpenTAP(tunName) + } else { + fd, err = tun.Open(tunName) + } + if err != nil { + log.Fatal(err) + } + + linkID := fdbased.New(&fdbased.Options{ + FD: fd, + MTU: mtu, + EthernetHeader: *tap, + Address: tcpip.LinkAddress(maddr), + }) + if err := s.CreateNIC(1, linkID); err != nil { + log.Fatal(err) + } + + if err := s.AddAddress(1, proto, addr); err != nil { + log.Fatal(err) + } + + if err := s.AddAddress(1, arp.ProtocolNumber, arp.ProtocolAddress); err != nil { + log.Fatal(err) + } + + // Add default route. + s.SetRouteTable([]tcpip.Route{ + { + Destination: tcpip.Address(strings.Repeat("\x00", len(addr))), + Mask: tcpip.Address(strings.Repeat("\x00", len(addr))), + Gateway: "", + NIC: 1, + }, + }) + + // Create TCP endpoint, bind it, then start listening. + var wq waiter.Queue + ep, e := s.NewEndpoint(tcp.ProtocolNumber, proto, &wq) + if err != nil { + log.Fatal(e) + } + + defer ep.Close() + + if err := ep.Bind(tcpip.FullAddress{0, "", uint16(localPort)}, nil); err != nil { + log.Fatal("Bind failed: ", err) + } + + if err := ep.Listen(10); err != nil { + log.Fatal("Listen failed: ", err) + } + + // Wait for connections to appear. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventIn) + defer wq.EventUnregister(&waitEntry) + + for { + n, wq, err := ep.Accept() + if err != nil { + if err == tcpip.ErrWouldBlock { + <-notifyCh + continue + } + + log.Fatal("Accept() failed:", err) + } + + go echo(wq, n) // S/R-FIXME + } +} diff --git a/pkg/tcpip/seqnum/BUILD b/pkg/tcpip/seqnum/BUILD new file mode 100644 index 000000000..0c717ec8d --- /dev/null +++ b/pkg/tcpip/seqnum/BUILD @@ -0,0 +1,26 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "seqnum_state", + srcs = [ + "seqnum.go", + ], + out = "seqnum_state.go", + package = "seqnum", +) + +go_library( + name = "seqnum", + srcs = [ + "seqnum.go", + "seqnum_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum", + visibility = [ + "//visibility:public", + ], + deps = ["//pkg/state"], +) diff --git a/pkg/tcpip/seqnum/seqnum.go b/pkg/tcpip/seqnum/seqnum.go new file mode 100644 index 000000000..f689be984 --- /dev/null +++ b/pkg/tcpip/seqnum/seqnum.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package seqnum defines the types and methods for TCP sequence numbers such +// that they fit in 32-bit words and work properly when overflows occur. +package seqnum + +// Value represents the value of a sequence number. +type Value uint32 + +// Size represents the size (length) of a sequence number window. +type Size uint32 + +// LessThan checks if v is before w, i.e., v < w. +func (v Value) LessThan(w Value) bool { + return int32(v-w) < 0 +} + +// LessThanEq returns true if v==w or v is before i.e., v < w. +func (v Value) LessThanEq(w Value) bool { + if v == w { + return true + } + return v.LessThan(w) +} + +// InRange checks if v is in the range [a,b), i.e., a <= v < b. +func (v Value) InRange(a, b Value) bool { + return v-a < b-a +} + +// InWindow checks if v is in the window that starts at 'first' and spans 'size' +// sequence numbers. +func (v Value) InWindow(first Value, size Size) bool { + return v.InRange(first, first.Add(size)) +} + +// Overlap checks if the window [a,a+b) overlaps with the window [x, x+y). +func Overlap(a Value, b Size, x Value, y Size) bool { + return a.LessThan(x.Add(y)) && x.LessThan(a.Add(b)) +} + +// Add calculates the sequence number following the [v, v+s) window. +func (v Value) Add(s Size) Value { + return v + Value(s) +} + +// Size calculates the size of the window defined by [v, w). +func (v Value) Size(w Value) Size { + return Size(w - v) +} + +// UpdateForward updates v such that it becomes v + s. +func (v *Value) UpdateForward(s Size) { + *v += Value(s) +} diff --git a/pkg/tcpip/stack/BUILD b/pkg/tcpip/stack/BUILD new file mode 100644 index 000000000..079ade2c8 --- /dev/null +++ b/pkg/tcpip/stack/BUILD @@ -0,0 +1,70 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "stack_state", + srcs = [ + "registration.go", + "stack.go", + ], + out = "stack_state.go", + package = "stack", +) + +go_library( + name = "stack", + srcs = [ + "linkaddrcache.go", + "nic.go", + "registration.go", + "route.go", + "stack.go", + "stack_global_state.go", + "stack_state.go", + "transport_demuxer.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/stack", + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/ilist", + "//pkg/sleep", + "//pkg/state", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/ports", + "//pkg/tcpip/seqnum", + "//pkg/waiter", + ], +) + +go_test( + name = "stack_x_test", + size = "small", + srcs = [ + "stack_test.go", + "transport_test.go", + ], + deps = [ + ":stack", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/link/channel", + "//pkg/waiter", + ], +) + +go_test( + name = "stack_test", + size = "small", + srcs = ["linkaddrcache_test.go"], + embed = [":stack"], + deps = [ + "//pkg/sleep", + "//pkg/tcpip", + ], +) diff --git a/pkg/tcpip/stack/linkaddrcache.go b/pkg/tcpip/stack/linkaddrcache.go new file mode 100644 index 000000000..789f97882 --- /dev/null +++ b/pkg/tcpip/stack/linkaddrcache.go @@ -0,0 +1,313 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "fmt" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +const linkAddrCacheSize = 512 // max cache entries + +// linkAddrCache is a fixed-sized cache mapping IP addresses to link addresses. +// +// The entries are stored in a ring buffer, oldest entry replaced first. +// +// This struct is safe for concurrent use. +type linkAddrCache struct { + // ageLimit is how long a cache entry is valid for. + ageLimit time.Duration + + // resolutionTimeout is the amount of time to wait for a link request to + // resolve an address. + resolutionTimeout time.Duration + + // resolutionAttempts is the number of times an address is attempted to be + // resolved before failing. + resolutionAttempts int + + mu sync.Mutex + cache map[tcpip.FullAddress]*linkAddrEntry + next int // array index of next available entry + entries [linkAddrCacheSize]linkAddrEntry +} + +// entryState controls the state of a single entry in the cache. +type entryState int + +const ( + // incomplete means that there is an outstanding request to resolve the + // address. This is the initial state. + incomplete entryState = iota + // ready means that the address has been resolved and can be used. + ready + // failed means that address resolution timed out and the address + // could not be resolved. + failed + // expired means that the cache entry has expired and the address must be + // resolved again. + expired +) + +// String implements Stringer. +func (s entryState) String() string { + switch s { + case incomplete: + return "incomplete" + case ready: + return "ready" + case failed: + return "failed" + case expired: + return "expired" + default: + return fmt.Sprintf("invalid entryState: %d", s) + } +} + +// A linkAddrEntry is an entry in the linkAddrCache. +// This struct is thread-compatible. +type linkAddrEntry struct { + addr tcpip.FullAddress + linkAddr tcpip.LinkAddress + expiration time.Time + s entryState + + // wakers is a set of waiters for address resolution result. Anytime + // state transitions out of 'incomplete' these waiters are notified. + wakers map[*sleep.Waker]struct{} + + cancel chan struct{} +} + +func (e *linkAddrEntry) state() entryState { + if e.s != expired && time.Now().After(e.expiration) { + // Force the transition to ensure waiters are notified. + e.changeState(expired) + } + return e.s +} + +func (e *linkAddrEntry) changeState(ns entryState) { + if e.s == ns { + return + } + + // Validate state transition. + switch e.s { + case incomplete: + // All transitions are valid. + case ready, failed: + if ns != expired { + panic(fmt.Sprintf("invalid state transition from %v to %v", e.s, ns)) + } + case expired: + // Terminal state. + panic(fmt.Sprintf("invalid state transition from %v to %v", e.s, ns)) + default: + panic(fmt.Sprintf("invalid state: %v", e.s)) + } + + // Notify whoever is waiting on address resolution when transitioning + // out of 'incomplete'. + if e.s == incomplete { + for w := range e.wakers { + w.Assert() + } + e.wakers = nil + } + e.s = ns +} + +func (e *linkAddrEntry) addWaker(w *sleep.Waker) { + e.wakers[w] = struct{}{} +} + +func (e *linkAddrEntry) removeWaker(w *sleep.Waker) { + delete(e.wakers, w) +} + +// add adds a k -> v mapping to the cache. +func (c *linkAddrCache) add(k tcpip.FullAddress, v tcpip.LinkAddress) { + c.mu.Lock() + defer c.mu.Unlock() + + entry := c.cache[k] + if entry != nil { + s := entry.state() + if s != expired && entry.linkAddr == v { + // Disregard repeated calls. + return + } + // Check if entry is waiting for address resolution. + if s == incomplete { + entry.linkAddr = v + } else { + // Otherwise create a new entry to replace it. + entry = c.makeAndAddEntry(k, v) + } + } else { + entry = c.makeAndAddEntry(k, v) + } + + entry.changeState(ready) +} + +// makeAndAddEntry is a helper function to create and add a new +// entry to the cache map and evict older entry as needed. +func (c *linkAddrCache) makeAndAddEntry(k tcpip.FullAddress, v tcpip.LinkAddress) *linkAddrEntry { + // Take over the next entry. + entry := &c.entries[c.next] + if c.cache[entry.addr] == entry { + delete(c.cache, entry.addr) + } + + // Mark the soon-to-be-replaced entry as expired, just in case there is + // someone waiting for address resolution on it. + entry.changeState(expired) + if entry.cancel != nil { + entry.cancel <- struct{}{} + } + + *entry = linkAddrEntry{ + addr: k, + linkAddr: v, + expiration: time.Now().Add(c.ageLimit), + wakers: make(map[*sleep.Waker]struct{}), + cancel: make(chan struct{}, 1), + } + + c.cache[k] = entry + c.next++ + if c.next == len(c.entries) { + c.next = 0 + } + return entry +} + +// get reports any known link address for k. +func (c *linkAddrCache) get(k tcpip.FullAddress, linkRes LinkAddressResolver, localAddr tcpip.Address, linkEP LinkEndpoint, waker *sleep.Waker) (tcpip.LinkAddress, *tcpip.Error) { + if linkRes != nil { + if addr, ok := linkRes.ResolveStaticAddress(k.Addr); ok { + return addr, nil + } + } + + c.mu.Lock() + entry := c.cache[k] + if entry == nil || entry.state() == expired { + c.mu.Unlock() + if linkRes == nil { + return "", tcpip.ErrNoLinkAddress + } + c.startAddressResolution(k, linkRes, localAddr, linkEP, waker) + return "", tcpip.ErrWouldBlock + } + defer c.mu.Unlock() + + switch s := entry.state(); s { + case expired: + // It's possible that entry expired between state() call above and here + // in that case it's safe to consider it ready. + fallthrough + case ready: + return entry.linkAddr, nil + case failed: + return "", tcpip.ErrNoLinkAddress + case incomplete: + // Address resolution is still in progress. + entry.addWaker(waker) + return "", tcpip.ErrWouldBlock + default: + panic(fmt.Sprintf("invalid cache entry state: %d", s)) + } +} + +// removeWaker removes a waker previously added through get(). +func (c *linkAddrCache) removeWaker(k tcpip.FullAddress, waker *sleep.Waker) { + c.mu.Lock() + defer c.mu.Unlock() + + if entry := c.cache[k]; entry != nil { + entry.removeWaker(waker) + } +} + +func (c *linkAddrCache) startAddressResolution(k tcpip.FullAddress, linkRes LinkAddressResolver, localAddr tcpip.Address, linkEP LinkEndpoint, waker *sleep.Waker) { + c.mu.Lock() + defer c.mu.Unlock() + + // Look up again with lock held to ensure entry wasn't added by someone else. + if e := c.cache[k]; e != nil && e.state() != expired { + return + } + + // Add 'incomplete' entry in the cache to mark that resolution is in progress. + e := c.makeAndAddEntry(k, "") + e.addWaker(waker) + + go func() { // S/R-FIXME + for i := 0; ; i++ { + // Send link request, then wait for the timeout limit and check + // whether the request succeeded. + linkRes.LinkAddressRequest(k.Addr, localAddr, linkEP) + c.mu.Lock() + cancel := e.cancel + c.mu.Unlock() + + select { + case <-time.After(c.resolutionTimeout): + if stop := c.checkLinkRequest(k, i); stop { + return + } + case <-cancel: + return + } + } + }() +} + +// checkLinkRequest checks whether previous attempt to resolve address has succeeded +// and mark the entry accordingly, e.g. ready, failed, etc. Return true if request +// can stop, false if another request should be sent. +func (c *linkAddrCache) checkLinkRequest(k tcpip.FullAddress, attempt int) bool { + c.mu.Lock() + defer c.mu.Unlock() + + entry, ok := c.cache[k] + if !ok { + // Entry was evicted from the cache. + return true + } + + switch s := entry.state(); s { + case ready, failed, expired: + // Entry was made ready by resolver or failed. Either way we're done. + return true + case incomplete: + if attempt+1 >= c.resolutionAttempts { + // Max number of retries reached, mark entry as failed. + entry.changeState(failed) + return true + } + // No response yet, need to send another ARP request. + return false + default: + panic(fmt.Sprintf("invalid cache entry state: %d", s)) + } +} + +func newLinkAddrCache(ageLimit, resolutionTimeout time.Duration, resolutionAttempts int) *linkAddrCache { + return &linkAddrCache{ + ageLimit: ageLimit, + resolutionTimeout: resolutionTimeout, + resolutionAttempts: resolutionAttempts, + cache: make(map[tcpip.FullAddress]*linkAddrEntry, linkAddrCacheSize), + } +} diff --git a/pkg/tcpip/stack/linkaddrcache_test.go b/pkg/tcpip/stack/linkaddrcache_test.go new file mode 100644 index 000000000..e9897b2bd --- /dev/null +++ b/pkg/tcpip/stack/linkaddrcache_test.go @@ -0,0 +1,256 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "fmt" + "sync" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" +) + +type testaddr struct { + addr tcpip.FullAddress + linkAddr tcpip.LinkAddress +} + +var testaddrs []testaddr + +type testLinkAddressResolver struct { + cache *linkAddrCache + delay time.Duration +} + +func (r *testLinkAddressResolver) LinkAddressRequest(addr, _ tcpip.Address, _ LinkEndpoint) *tcpip.Error { + go func() { + if r.delay > 0 { + time.Sleep(r.delay) + } + r.fakeRequest(addr) + }() + return nil +} + +func (r *testLinkAddressResolver) fakeRequest(addr tcpip.Address) { + for _, ta := range testaddrs { + if ta.addr.Addr == addr { + r.cache.add(ta.addr, ta.linkAddr) + break + } + } +} + +func (*testLinkAddressResolver) ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) { + if addr == "broadcast" { + return "mac_broadcast", true + } + return "", false +} + +func (*testLinkAddressResolver) LinkAddressProtocol() tcpip.NetworkProtocolNumber { + return 1 +} + +func getBlocking(c *linkAddrCache, addr tcpip.FullAddress, linkRes LinkAddressResolver) (tcpip.LinkAddress, *tcpip.Error) { + w := sleep.Waker{} + s := sleep.Sleeper{} + s.AddWaker(&w, 123) + defer s.Done() + + for { + if got, err := c.get(addr, linkRes, "", nil, &w); err != tcpip.ErrWouldBlock { + return got, err + } + s.Fetch(true) + } +} + +func init() { + for i := 0; i < 4*linkAddrCacheSize; i++ { + addr := fmt.Sprintf("Addr%06d", i) + testaddrs = append(testaddrs, testaddr{ + addr: tcpip.FullAddress{NIC: 1, Addr: tcpip.Address(addr)}, + linkAddr: tcpip.LinkAddress("Link" + addr), + }) + } +} + +func TestCacheOverflow(t *testing.T) { + c := newLinkAddrCache(1<<63-1, 1*time.Second, 3) + for i := len(testaddrs) - 1; i >= 0; i-- { + e := testaddrs[i] + c.add(e.addr, e.linkAddr) + got, err := c.get(e.addr, nil, "", nil, nil) + if err != nil { + t.Errorf("insert %d, c.get(%q)=%q, got error: %v", i, string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("insert %d, c.get(%q)=%q, want %q", i, string(e.addr.Addr), got, e.linkAddr) + } + } + // Expect to find at least half of the most recent entries. + for i := 0; i < linkAddrCacheSize/2; i++ { + e := testaddrs[i] + got, err := c.get(e.addr, nil, "", nil, nil) + if err != nil { + t.Errorf("check %d, c.get(%q)=%q, got error: %v", i, string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("check %d, c.get(%q)=%q, want %q", i, string(e.addr.Addr), got, e.linkAddr) + } + } + // The earliest entries should no longer be in the cache. + for i := len(testaddrs) - 1; i >= len(testaddrs)-linkAddrCacheSize; i-- { + e := testaddrs[i] + if _, err := c.get(e.addr, nil, "", nil, nil); err != tcpip.ErrNoLinkAddress { + t.Errorf("check %d, c.get(%q), got error: %v, want: error ErrNoLinkAddress", i, string(e.addr.Addr), err) + } + } +} + +func TestCacheConcurrent(t *testing.T) { + c := newLinkAddrCache(1<<63-1, 1*time.Second, 3) + + var wg sync.WaitGroup + for r := 0; r < 16; r++ { + wg.Add(1) + go func() { + for _, e := range testaddrs { + c.add(e.addr, e.linkAddr) + c.get(e.addr, nil, "", nil, nil) // make work for gotsan + } + wg.Done() + }() + } + wg.Wait() + + // All goroutines add in the same order and add more values than + // can fit in the cache, so our eviction strategy requires that + // the last entry be present and the first be missing. + e := testaddrs[len(testaddrs)-1] + got, err := c.get(e.addr, nil, "", nil, nil) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("c.get(%q)=%q, want %q", string(e.addr.Addr), got, e.linkAddr) + } + + e = testaddrs[0] + if _, err := c.get(e.addr, nil, "", nil, nil); err != tcpip.ErrNoLinkAddress { + t.Errorf("c.get(%q), got error: %v, want: error ErrNoLinkAddress", string(e.addr.Addr), err) + } +} + +func TestCacheAgeLimit(t *testing.T) { + c := newLinkAddrCache(1*time.Millisecond, 1*time.Second, 3) + e := testaddrs[0] + c.add(e.addr, e.linkAddr) + time.Sleep(50 * time.Millisecond) + if _, err := c.get(e.addr, nil, "", nil, nil); err != tcpip.ErrNoLinkAddress { + t.Errorf("c.get(%q), got error: %v, want: error ErrNoLinkAddress", string(e.addr.Addr), err) + } +} + +func TestCacheReplace(t *testing.T) { + c := newLinkAddrCache(1<<63-1, 1*time.Second, 3) + e := testaddrs[0] + l2 := e.linkAddr + "2" + c.add(e.addr, e.linkAddr) + got, err := c.get(e.addr, nil, "", nil, nil) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("c.get(%q)=%q, want %q", string(e.addr.Addr), got, e.linkAddr) + } + + c.add(e.addr, l2) + got, err = c.get(e.addr, nil, "", nil, nil) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(e.addr.Addr), got, err) + } + if got != l2 { + t.Errorf("c.get(%q)=%q, want %q", string(e.addr.Addr), got, l2) + } +} + +func TestCacheResolution(t *testing.T) { + c := newLinkAddrCache(1<<63-1, 250*time.Millisecond, 1) + linkRes := &testLinkAddressResolver{cache: c} + for i, ta := range testaddrs { + got, err := getBlocking(c, ta.addr, linkRes) + if err != nil { + t.Errorf("check %d, c.get(%q)=%q, got error: %v", i, string(ta.addr.Addr), got, err) + } + if got != ta.linkAddr { + t.Errorf("check %d, c.get(%q)=%q, want %q", i, string(ta.addr.Addr), got, ta.linkAddr) + } + } + + // Check that after resolved, address stays in the cache and never returns WouldBlock. + for i := 0; i < 10; i++ { + e := testaddrs[len(testaddrs)-1] + got, err := c.get(e.addr, linkRes, "", nil, nil) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("c.get(%q)=%q, want %q", string(e.addr.Addr), got, e.linkAddr) + } + } +} + +func TestCacheResolutionFailed(t *testing.T) { + c := newLinkAddrCache(1<<63-1, 10*time.Millisecond, 5) + linkRes := &testLinkAddressResolver{cache: c} + + // First, sanity check that resolution is working... + e := testaddrs[0] + got, err := getBlocking(c, e.addr, linkRes) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(e.addr.Addr), got, err) + } + if got != e.linkAddr { + t.Errorf("c.get(%q)=%q, want %q", string(e.addr.Addr), got, e.linkAddr) + } + + e.addr.Addr += "2" + if _, err := getBlocking(c, e.addr, linkRes); err != tcpip.ErrNoLinkAddress { + t.Errorf("c.get(%q), got error: %v, want: error ErrNoLinkAddress", string(e.addr.Addr), err) + } +} + +func TestCacheResolutionTimeout(t *testing.T) { + resolverDelay := 50 * time.Millisecond + expiration := resolverDelay / 2 + c := newLinkAddrCache(expiration, 1*time.Millisecond, 3) + linkRes := &testLinkAddressResolver{cache: c, delay: resolverDelay} + + e := testaddrs[0] + if _, err := getBlocking(c, e.addr, linkRes); err != tcpip.ErrNoLinkAddress { + t.Errorf("c.get(%q), got error: %v, want: error ErrNoLinkAddress", string(e.addr.Addr), err) + } +} + +// TestStaticResolution checks that static link addresses are resolved immediately and don't +// send resolution requests. +func TestStaticResolution(t *testing.T) { + c := newLinkAddrCache(1<<63-1, time.Millisecond, 1) + linkRes := &testLinkAddressResolver{cache: c, delay: time.Minute} + + addr := tcpip.Address("broadcast") + want := tcpip.LinkAddress("mac_broadcast") + got, err := c.get(tcpip.FullAddress{Addr: addr}, linkRes, "", nil, nil) + if err != nil { + t.Errorf("c.get(%q)=%q, got error: %v", string(addr), string(got), err) + } + if got != want { + t.Errorf("c.get(%q)=%q, want %q", string(addr), string(got), string(want)) + } +} diff --git a/pkg/tcpip/stack/nic.go b/pkg/tcpip/stack/nic.go new file mode 100644 index 000000000..8ff4310d5 --- /dev/null +++ b/pkg/tcpip/stack/nic.go @@ -0,0 +1,453 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "strings" + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +// NIC represents a "network interface card" to which the networking stack is +// attached. +type NIC struct { + stack *Stack + id tcpip.NICID + name string + linkEP LinkEndpoint + + demux *transportDemuxer + + mu sync.RWMutex + spoofing bool + promiscuous bool + primary map[tcpip.NetworkProtocolNumber]*ilist.List + endpoints map[NetworkEndpointID]*referencedNetworkEndpoint + subnets []tcpip.Subnet +} + +func newNIC(stack *Stack, id tcpip.NICID, name string, ep LinkEndpoint) *NIC { + return &NIC{ + stack: stack, + id: id, + name: name, + linkEP: ep, + demux: newTransportDemuxer(stack), + primary: make(map[tcpip.NetworkProtocolNumber]*ilist.List), + endpoints: make(map[NetworkEndpointID]*referencedNetworkEndpoint), + } +} + +// attachLinkEndpoint attaches the NIC to the endpoint, which will enable it +// to start delivering packets. +func (n *NIC) attachLinkEndpoint() { + n.linkEP.Attach(n) +} + +// setPromiscuousMode enables or disables promiscuous mode. +func (n *NIC) setPromiscuousMode(enable bool) { + n.mu.Lock() + n.promiscuous = enable + n.mu.Unlock() +} + +// setSpoofing enables or disables address spoofing. +func (n *NIC) setSpoofing(enable bool) { + n.mu.Lock() + n.spoofing = enable + n.mu.Unlock() +} + +// primaryEndpoint returns the primary endpoint of n for the given network +// protocol. +func (n *NIC) primaryEndpoint(protocol tcpip.NetworkProtocolNumber) *referencedNetworkEndpoint { + n.mu.RLock() + defer n.mu.RUnlock() + + list := n.primary[protocol] + if list == nil { + return nil + } + + for e := list.Front(); e != nil; e = e.Next() { + r := e.(*referencedNetworkEndpoint) + if r.tryIncRef() { + return r + } + } + + return nil +} + +// findEndpoint finds the endpoint, if any, with the given address. +func (n *NIC) findEndpoint(protocol tcpip.NetworkProtocolNumber, address tcpip.Address) *referencedNetworkEndpoint { + id := NetworkEndpointID{address} + + n.mu.RLock() + ref := n.endpoints[id] + if ref != nil && !ref.tryIncRef() { + ref = nil + } + spoofing := n.spoofing + n.mu.RUnlock() + + if ref != nil || !spoofing { + return ref + } + + // Try again with the lock in exclusive mode. If we still can't get the + // endpoint, create a new "temporary" endpoint. It will only exist while + // there's a route through it. + n.mu.Lock() + ref = n.endpoints[id] + if ref == nil || !ref.tryIncRef() { + ref, _ = n.addAddressLocked(protocol, address, true) + if ref != nil { + ref.holdsInsertRef = false + } + } + n.mu.Unlock() + return ref +} + +func (n *NIC) addAddressLocked(protocol tcpip.NetworkProtocolNumber, addr tcpip.Address, replace bool) (*referencedNetworkEndpoint, *tcpip.Error) { + netProto, ok := n.stack.networkProtocols[protocol] + if !ok { + return nil, tcpip.ErrUnknownProtocol + } + + // Create the new network endpoint. + ep, err := netProto.NewEndpoint(n.id, addr, n.stack, n, n.linkEP) + if err != nil { + return nil, err + } + + id := *ep.ID() + if ref, ok := n.endpoints[id]; ok { + if !replace { + return nil, tcpip.ErrDuplicateAddress + } + + n.removeEndpointLocked(ref) + } + + ref := &referencedNetworkEndpoint{ + refs: 1, + ep: ep, + nic: n, + protocol: protocol, + holdsInsertRef: true, + } + + // Set up cache if link address resolution exists for this protocol. + if n.linkEP.Capabilities()&CapabilityResolutionRequired != 0 { + if linkRes := n.stack.linkAddrResolvers[protocol]; linkRes != nil { + ref.linkCache = n.stack + } + } + + n.endpoints[id] = ref + + l, ok := n.primary[protocol] + if !ok { + l = &ilist.List{} + n.primary[protocol] = l + } + + l.PushBack(ref) + + return ref, nil +} + +// AddAddress adds a new address to n, so that it starts accepting packets +// targeted at the given address (and network protocol). +func (n *NIC) AddAddress(protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) *tcpip.Error { + // Add the endpoint. + n.mu.Lock() + _, err := n.addAddressLocked(protocol, addr, false) + n.mu.Unlock() + + return err +} + +// Addresses returns the addresses associated with this NIC. +func (n *NIC) Addresses() []tcpip.ProtocolAddress { + n.mu.RLock() + defer n.mu.RUnlock() + addrs := make([]tcpip.ProtocolAddress, 0, len(n.endpoints)) + for nid, ep := range n.endpoints { + addrs = append(addrs, tcpip.ProtocolAddress{ + Protocol: ep.protocol, + Address: nid.LocalAddress, + }) + } + return addrs +} + +// AddSubnet adds a new subnet to n, so that it starts accepting packets +// targeted at the given address and network protocol. +func (n *NIC) AddSubnet(protocol tcpip.NetworkProtocolNumber, subnet tcpip.Subnet) { + n.mu.Lock() + n.subnets = append(n.subnets, subnet) + n.mu.Unlock() +} + +// Subnets returns the Subnets associated with this NIC. +func (n *NIC) Subnets() []tcpip.Subnet { + n.mu.RLock() + defer n.mu.RUnlock() + sns := make([]tcpip.Subnet, 0, len(n.subnets)+len(n.endpoints)) + for nid := range n.endpoints { + sn, err := tcpip.NewSubnet(nid.LocalAddress, tcpip.AddressMask(strings.Repeat("\xff", len(nid.LocalAddress)))) + if err != nil { + // This should never happen as the mask has been carefully crafted to + // match the address. + panic("Invalid endpoint subnet: " + err.Error()) + } + sns = append(sns, sn) + } + return append(sns, n.subnets...) +} + +func (n *NIC) removeEndpointLocked(r *referencedNetworkEndpoint) { + id := *r.ep.ID() + + // Nothing to do if the reference has already been replaced with a + // different one. + if n.endpoints[id] != r { + return + } + + if r.holdsInsertRef { + panic("Reference count dropped to zero before being removed") + } + + delete(n.endpoints, id) + n.primary[r.protocol].Remove(r) + r.ep.Close() +} + +func (n *NIC) removeEndpoint(r *referencedNetworkEndpoint) { + n.mu.Lock() + n.removeEndpointLocked(r) + n.mu.Unlock() +} + +// RemoveAddress removes an address from n. +func (n *NIC) RemoveAddress(addr tcpip.Address) *tcpip.Error { + n.mu.Lock() + r := n.endpoints[NetworkEndpointID{addr}] + if r == nil || !r.holdsInsertRef { + n.mu.Unlock() + return tcpip.ErrBadLocalAddress + } + + r.holdsInsertRef = false + n.mu.Unlock() + + r.decRef() + + return nil +} + +// DeliverNetworkPacket finds the appropriate network protocol endpoint and +// hands the packet over for further processing. This function is called when +// the NIC receives a packet from the physical interface. +// Note that the ownership of the slice backing vv is retained by the caller. +// This rule applies only to the slice itself, not to the items of the slice; +// the ownership of the items is not retained by the caller. +func (n *NIC) DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) { + netProto, ok := n.stack.networkProtocols[protocol] + if !ok { + atomic.AddUint64(&n.stack.stats.UnknownProtocolRcvdPackets, 1) + return + } + + if len(vv.First()) < netProto.MinimumPacketSize() { + atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1) + return + } + + src, dst := netProto.ParseAddresses(vv.First()) + id := NetworkEndpointID{dst} + + n.mu.RLock() + ref := n.endpoints[id] + if ref != nil && !ref.tryIncRef() { + ref = nil + } + promiscuous := n.promiscuous + subnets := n.subnets + n.mu.RUnlock() + + if ref == nil { + // Check if the packet is for a subnet this NIC cares about. + if !promiscuous { + for _, sn := range subnets { + if sn.Contains(dst) { + promiscuous = true + break + } + } + } + if promiscuous { + // Try again with the lock in exclusive mode. If we still can't + // get the endpoint, create a new "temporary" one. It will only + // exist while there's a route through it. + n.mu.Lock() + ref = n.endpoints[id] + if ref == nil || !ref.tryIncRef() { + ref, _ = n.addAddressLocked(protocol, dst, true) + if ref != nil { + ref.holdsInsertRef = false + } + } + n.mu.Unlock() + } + } + + if ref == nil { + atomic.AddUint64(&n.stack.stats.UnknownNetworkEndpointRcvdPackets, 1) + return + } + + r := makeRoute(protocol, dst, src, ref) + r.LocalLinkAddress = linkEP.LinkAddress() + r.RemoteLinkAddress = remoteLinkAddr + ref.ep.HandlePacket(&r, vv) + ref.decRef() +} + +// DeliverTransportPacket delivers the packets to the appropriate transport +// protocol endpoint. +func (n *NIC) DeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView) { + state, ok := n.stack.transportProtocols[protocol] + if !ok { + atomic.AddUint64(&n.stack.stats.UnknownProtocolRcvdPackets, 1) + return + } + + transProto := state.proto + if len(vv.First()) < transProto.MinimumPacketSize() { + atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1) + return + } + + srcPort, dstPort, err := transProto.ParsePorts(vv.First()) + if err != nil { + atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1) + return + } + + id := TransportEndpointID{dstPort, r.LocalAddress, srcPort, r.RemoteAddress} + if n.demux.deliverPacket(r, protocol, vv, id) { + return + } + if n.stack.demux.deliverPacket(r, protocol, vv, id) { + return + } + + // Try to deliver to per-stack default handler. + if state.defaultHandler != nil { + if state.defaultHandler(r, id, vv) { + return + } + } + + // We could not find an appropriate destination for this packet, so + // deliver it to the global handler. + if !transProto.HandleUnknownDestinationPacket(r, id, vv) { + atomic.AddUint64(&n.stack.stats.MalformedRcvdPackets, 1) + } +} + +// DeliverTransportControlPacket delivers control packets to the appropriate +// transport protocol endpoint. +func (n *NIC) DeliverTransportControlPacket(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, typ ControlType, extra uint32, vv *buffer.VectorisedView) { + state, ok := n.stack.transportProtocols[trans] + if !ok { + return + } + + transProto := state.proto + + // ICMPv4 only guarantees that 8 bytes of the transport protocol will + // be present in the payload. We know that the ports are within the + // first 8 bytes for all known transport protocols. + if len(vv.First()) < 8 { + return + } + + srcPort, dstPort, err := transProto.ParsePorts(vv.First()) + if err != nil { + return + } + + id := TransportEndpointID{srcPort, local, dstPort, remote} + if n.demux.deliverControlPacket(net, trans, typ, extra, vv, id) { + return + } + if n.stack.demux.deliverControlPacket(net, trans, typ, extra, vv, id) { + return + } +} + +// ID returns the identifier of n. +func (n *NIC) ID() tcpip.NICID { + return n.id +} + +type referencedNetworkEndpoint struct { + ilist.Entry + refs int32 + ep NetworkEndpoint + nic *NIC + protocol tcpip.NetworkProtocolNumber + + // linkCache is set if link address resolution is enabled for this + // protocol. Set to nil otherwise. + linkCache LinkAddressCache + + // holdsInsertRef is protected by the NIC's mutex. It indicates whether + // the reference count is biased by 1 due to the insertion of the + // endpoint. It is reset to false when RemoveAddress is called on the + // NIC. + holdsInsertRef bool +} + +// decRef decrements the ref count and cleans up the endpoint once it reaches +// zero. +func (r *referencedNetworkEndpoint) decRef() { + if atomic.AddInt32(&r.refs, -1) == 0 { + r.nic.removeEndpoint(r) + } +} + +// incRef increments the ref count. It must only be called when the caller is +// known to be holding a reference to the endpoint, otherwise tryIncRef should +// be used. +func (r *referencedNetworkEndpoint) incRef() { + atomic.AddInt32(&r.refs, 1) +} + +// tryIncRef attempts to increment the ref count from n to n+1, but only if n is +// not zero. That is, it will increment the count if the endpoint is still +// alive, and do nothing if it has already been clean up. +func (r *referencedNetworkEndpoint) tryIncRef() bool { + for { + v := atomic.LoadInt32(&r.refs) + if v == 0 { + return false + } + + if atomic.CompareAndSwapInt32(&r.refs, v, v+1) { + return true + } + } +} diff --git a/pkg/tcpip/stack/registration.go b/pkg/tcpip/stack/registration.go new file mode 100644 index 000000000..e7e6381ac --- /dev/null +++ b/pkg/tcpip/stack/registration.go @@ -0,0 +1,322 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// NetworkEndpointID is the identifier of a network layer protocol endpoint. +// Currently the local address is sufficient because all supported protocols +// (i.e., IPv4 and IPv6) have different sizes for their addresses. +type NetworkEndpointID struct { + LocalAddress tcpip.Address +} + +// TransportEndpointID is the identifier of a transport layer protocol endpoint. +type TransportEndpointID struct { + // LocalPort is the local port associated with the endpoint. + LocalPort uint16 + + // LocalAddress is the local [network layer] address associated with + // the endpoint. + LocalAddress tcpip.Address + + // RemotePort is the remote port associated with the endpoint. + RemotePort uint16 + + // RemoteAddress it the remote [network layer] address associated with + // the endpoint. + RemoteAddress tcpip.Address +} + +// ControlType is the type of network control message. +type ControlType int + +// The following are the allowed values for ControlType values. +const ( + ControlPacketTooBig ControlType = iota + ControlPortUnreachable + ControlUnknown +) + +// TransportEndpoint is the interface that needs to be implemented by transport +// protocol (e.g., tcp, udp) endpoints that can handle packets. +type TransportEndpoint interface { + // HandlePacket is called by the stack when new packets arrive to + // this transport endpoint. + HandlePacket(r *Route, id TransportEndpointID, vv *buffer.VectorisedView) + + // HandleControlPacket is called by the stack when new control (e.g., + // ICMP) packets arrive to this transport endpoint. + HandleControlPacket(id TransportEndpointID, typ ControlType, extra uint32, vv *buffer.VectorisedView) +} + +// TransportProtocol is the interface that needs to be implemented by transport +// protocols (e.g., tcp, udp) that want to be part of the networking stack. +type TransportProtocol interface { + // Number returns the transport protocol number. + Number() tcpip.TransportProtocolNumber + + // NewEndpoint creates a new endpoint of the transport protocol. + NewEndpoint(stack *Stack, netProto tcpip.NetworkProtocolNumber, waitQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) + + // MinimumPacketSize returns the minimum valid packet size of this + // transport protocol. The stack automatically drops any packets smaller + // than this targeted at this protocol. + MinimumPacketSize() int + + // ParsePorts returns the source and destination ports stored in a + // packet of this protocol. + ParsePorts(v buffer.View) (src, dst uint16, err *tcpip.Error) + + // HandleUnknownDestinationPacket handles packets targeted at this + // protocol but that don't match any existing endpoint. For example, + // it is targeted at a port that have no listeners. + // + // The return value indicates whether the packet was well-formed (for + // stats purposes only). + HandleUnknownDestinationPacket(r *Route, id TransportEndpointID, vv *buffer.VectorisedView) bool + + // SetOption allows enabling/disabling protocol specific features. + // SetOption returns an error if the option is not supported or the + // provided option value is invalid. + SetOption(option interface{}) *tcpip.Error + + // Option allows retrieving protocol specific option values. + // Option returns an error if the option is not supported or the + // provided option value is invalid. + Option(option interface{}) *tcpip.Error +} + +// TransportDispatcher contains the methods used by the network stack to deliver +// packets to the appropriate transport endpoint after it has been handled by +// the network layer. +type TransportDispatcher interface { + // DeliverTransportPacket delivers packets to the appropriate + // transport protocol endpoint. + DeliverTransportPacket(r *Route, protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView) + + // DeliverTransportControlPacket delivers control packets to the + // appropriate transport protocol endpoint. + DeliverTransportControlPacket(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, typ ControlType, extra uint32, vv *buffer.VectorisedView) +} + +// NetworkEndpoint is the interface that needs to be implemented by endpoints +// of network layer protocols (e.g., ipv4, ipv6). +type NetworkEndpoint interface { + // MTU is the maximum transmission unit for this endpoint. This is + // generally calculated as the MTU of the underlying data link endpoint + // minus the network endpoint max header length. + MTU() uint32 + + // Capabilities returns the set of capabilities supported by the + // underlying link-layer endpoint. + Capabilities() LinkEndpointCapabilities + + // MaxHeaderLength returns the maximum size the network (and lower + // level layers combined) headers can have. Higher levels use this + // information to reserve space in the front of the packets they're + // building. + MaxHeaderLength() uint16 + + // WritePacket writes a packet to the given destination address and + // protocol. + WritePacket(r *Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error + + // ID returns the network protocol endpoint ID. + ID() *NetworkEndpointID + + // NICID returns the id of the NIC this endpoint belongs to. + NICID() tcpip.NICID + + // HandlePacket is called by the link layer when new packets arrive to + // this network endpoint. + HandlePacket(r *Route, vv *buffer.VectorisedView) + + // Close is called when the endpoint is reomved from a stack. + Close() +} + +// NetworkProtocol is the interface that needs to be implemented by network +// protocols (e.g., ipv4, ipv6) that want to be part of the networking stack. +type NetworkProtocol interface { + // Number returns the network protocol number. + Number() tcpip.NetworkProtocolNumber + + // MinimumPacketSize returns the minimum valid packet size of this + // network protocol. The stack automatically drops any packets smaller + // than this targeted at this protocol. + MinimumPacketSize() int + + // ParsePorts returns the source and destination addresses stored in a + // packet of this protocol. + ParseAddresses(v buffer.View) (src, dst tcpip.Address) + + // NewEndpoint creates a new endpoint of this protocol. + NewEndpoint(nicid tcpip.NICID, addr tcpip.Address, linkAddrCache LinkAddressCache, dispatcher TransportDispatcher, sender LinkEndpoint) (NetworkEndpoint, *tcpip.Error) + + // SetOption allows enabling/disabling protocol specific features. + // SetOption returns an error if the option is not supported or the + // provided option value is invalid. + SetOption(option interface{}) *tcpip.Error + + // Option allows retrieving protocol specific option values. + // Option returns an error if the option is not supported or the + // provided option value is invalid. + Option(option interface{}) *tcpip.Error +} + +// NetworkDispatcher contains the methods used by the network stack to deliver +// packets to the appropriate network endpoint after it has been handled by +// the data link layer. +type NetworkDispatcher interface { + // DeliverNetworkPacket finds the appropriate network protocol + // endpoint and hands the packet over for further processing. + DeliverNetworkPacket(linkEP LinkEndpoint, remoteLinkAddr tcpip.LinkAddress, protocol tcpip.NetworkProtocolNumber, vv *buffer.VectorisedView) +} + +// LinkEndpointCapabilities is the type associated with the capabilities +// supported by a link-layer endpoint. It is a set of bitfields. +type LinkEndpointCapabilities uint + +// The following are the supported link endpoint capabilities. +const ( + CapabilityChecksumOffload LinkEndpointCapabilities = 1 << iota + CapabilityResolutionRequired +) + +// LinkEndpoint is the interface implemented by data link layer protocols (e.g., +// ethernet, loopback, raw) and used by network layer protocols to send packets +// out through the implementer's data link endpoint. +type LinkEndpoint interface { + // MTU is the maximum transmission unit for this endpoint. This is + // usually dictated by the backing physical network; when such a + // physical network doesn't exist, the limit is generally 64k, which + // includes the maximum size of an IP packet. + MTU() uint32 + + // Capabilities returns the set of capabilities supported by the + // endpoint. + Capabilities() LinkEndpointCapabilities + + // MaxHeaderLength returns the maximum size the data link (and + // lower level layers combined) headers can have. Higher levels use this + // information to reserve space in the front of the packets they're + // building. + MaxHeaderLength() uint16 + + // LinkAddress returns the link address (typically a MAC) of the + // link endpoint. + LinkAddress() tcpip.LinkAddress + + // WritePacket writes a packet with the given protocol through the given + // route. + WritePacket(r *Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.NetworkProtocolNumber) *tcpip.Error + + // Attach attaches the data link layer endpoint to the network-layer + // dispatcher of the stack. + Attach(dispatcher NetworkDispatcher) +} + +// A LinkAddressResolver is an extension to a NetworkProtocol that +// can resolve link addresses. +type LinkAddressResolver interface { + // LinkAddressRequest sends a request for the LinkAddress of addr. + // The request is sent on linkEP with localAddr as the source. + // + // A valid response will cause the discovery protocol's network + // endpoint to call AddLinkAddress. + LinkAddressRequest(addr, localAddr tcpip.Address, linkEP LinkEndpoint) *tcpip.Error + + // ResolveStaticAddress attempts to resolve address without sending + // requests. It either resolves the name immediately or returns the + // empty LinkAddress. + // + // It can be used to resolve broadcast addresses for example. + ResolveStaticAddress(addr tcpip.Address) (tcpip.LinkAddress, bool) + + // LinkAddressProtocol returns the network protocol of the + // addresses this this resolver can resolve. + LinkAddressProtocol() tcpip.NetworkProtocolNumber +} + +// A LinkAddressCache caches link addresses. +type LinkAddressCache interface { + // CheckLocalAddress determines if the given local address exists, and if it + // does not exist. + CheckLocalAddress(nicid tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.NICID + + // AddLinkAddress adds a link address to the cache. + AddLinkAddress(nicid tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress) + + // GetLinkAddress looks up the cache to translate address to link address (e.g. IP -> MAC). + // If the LinkEndpoint requests address resolution and there is a LinkAddressResolver + // registered with the network protocol, the cache attempts to resolve the address + // and returns ErrWouldBlock. Waker is notified when address resolution is + // complete (success or not). + GetLinkAddress(nicid tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, w *sleep.Waker) (tcpip.LinkAddress, *tcpip.Error) + + // RemoveWaker removes a waker that has been added in GetLinkAddress(). + RemoveWaker(nicid tcpip.NICID, addr tcpip.Address, waker *sleep.Waker) +} + +// TransportProtocolFactory functions are used by the stack to instantiate +// transport protocols. +type TransportProtocolFactory func() TransportProtocol + +// NetworkProtocolFactory provides methods to be used by the stack to +// instantiate network protocols. +type NetworkProtocolFactory func() NetworkProtocol + +var ( + transportProtocols = make(map[string]TransportProtocolFactory) + networkProtocols = make(map[string]NetworkProtocolFactory) + + linkEPMu sync.RWMutex + nextLinkEndpointID tcpip.LinkEndpointID = 1 + linkEndpoints = make(map[tcpip.LinkEndpointID]LinkEndpoint) +) + +// RegisterTransportProtocolFactory registers a new transport protocol factory +// with the stack so that it becomes available to users of the stack. This +// function is intended to be called by init() functions of the protocols. +func RegisterTransportProtocolFactory(name string, p TransportProtocolFactory) { + transportProtocols[name] = p +} + +// RegisterNetworkProtocolFactory registers a new network protocol factory with +// the stack so that it becomes available to users of the stack. This function +// is intended to be called by init() functions of the protocols. +func RegisterNetworkProtocolFactory(name string, p NetworkProtocolFactory) { + networkProtocols[name] = p +} + +// RegisterLinkEndpoint register a link-layer protocol endpoint and returns an +// ID that can be used to refer to it. +func RegisterLinkEndpoint(linkEP LinkEndpoint) tcpip.LinkEndpointID { + linkEPMu.Lock() + defer linkEPMu.Unlock() + + v := nextLinkEndpointID + nextLinkEndpointID++ + + linkEndpoints[v] = linkEP + + return v +} + +// FindLinkEndpoint finds the link endpoint associated with the given ID. +func FindLinkEndpoint(id tcpip.LinkEndpointID) LinkEndpoint { + linkEPMu.RLock() + defer linkEPMu.RUnlock() + + return linkEndpoints[id] +} diff --git a/pkg/tcpip/stack/route.go b/pkg/tcpip/stack/route.go new file mode 100644 index 000000000..12f5efba5 --- /dev/null +++ b/pkg/tcpip/stack/route.go @@ -0,0 +1,133 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" +) + +// Route represents a route through the networking stack to a given destination. +type Route struct { + // RemoteAddress is the final destination of the route. + RemoteAddress tcpip.Address + + // RemoteLinkAddress is the link-layer (MAC) address of the + // final destination of the route. + RemoteLinkAddress tcpip.LinkAddress + + // LocalAddress is the local address where the route starts. + LocalAddress tcpip.Address + + // LocalLinkAddress is the link-layer (MAC) address of the + // where the route starts. + LocalLinkAddress tcpip.LinkAddress + + // NextHop is the next node in the path to the destination. + NextHop tcpip.Address + + // NetProto is the network-layer protocol. + NetProto tcpip.NetworkProtocolNumber + + // ref a reference to the network endpoint through which the route + // starts. + ref *referencedNetworkEndpoint +} + +// makeRoute initializes a new route. It takes ownership of the provided +// reference to a network endpoint. +func makeRoute(netProto tcpip.NetworkProtocolNumber, localAddr, remoteAddr tcpip.Address, ref *referencedNetworkEndpoint) Route { + return Route{ + NetProto: netProto, + LocalAddress: localAddr, + RemoteAddress: remoteAddr, + ref: ref, + } +} + +// NICID returns the id of the NIC from which this route originates. +func (r *Route) NICID() tcpip.NICID { + return r.ref.ep.NICID() +} + +// MaxHeaderLength forwards the call to the network endpoint's implementation. +func (r *Route) MaxHeaderLength() uint16 { + return r.ref.ep.MaxHeaderLength() +} + +// PseudoHeaderChecksum forwards the call to the network endpoint's +// implementation. +func (r *Route) PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber) uint16 { + return header.PseudoHeaderChecksum(protocol, r.LocalAddress, r.RemoteAddress) +} + +// Capabilities returns the link-layer capabilities of the route. +func (r *Route) Capabilities() LinkEndpointCapabilities { + return r.ref.ep.Capabilities() +} + +// Resolve attempts to resolve the link address if necessary. Returns ErrWouldBlock in +// case address resolution requires blocking, e.g. wait for ARP reply. Waker is +// notified when address resolution is complete (success or not). +func (r *Route) Resolve(waker *sleep.Waker) *tcpip.Error { + if !r.IsResolutionRequired() { + // Nothing to do if there is no cache (which does the resolution on cache miss) or + // link address is already known. + return nil + } + + nextAddr := r.NextHop + if nextAddr == "" { + nextAddr = r.RemoteAddress + } + linkAddr, err := r.ref.linkCache.GetLinkAddress(r.ref.nic.ID(), nextAddr, r.LocalAddress, r.NetProto, waker) + if err != nil { + return err + } + r.RemoteLinkAddress = linkAddr + return nil +} + +// RemoveWaker removes a waker that has been added in Resolve(). +func (r *Route) RemoveWaker(waker *sleep.Waker) { + nextAddr := r.NextHop + if nextAddr == "" { + nextAddr = r.RemoteAddress + } + r.ref.linkCache.RemoveWaker(r.ref.nic.ID(), nextAddr, waker) +} + +// IsResolutionRequired returns true if Resolve() must be called to resolve +// the link address before the this route can be written to. +func (r *Route) IsResolutionRequired() bool { + return r.ref.linkCache != nil && r.RemoteLinkAddress == "" +} + +// WritePacket writes the packet through the given route. +func (r *Route) WritePacket(hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error { + return r.ref.ep.WritePacket(r, hdr, payload, protocol) +} + +// MTU returns the MTU of the underlying network endpoint. +func (r *Route) MTU() uint32 { + return r.ref.ep.MTU() +} + +// Release frees all resources associated with the route. +func (r *Route) Release() { + if r.ref != nil { + r.ref.decRef() + r.ref = nil + } +} + +// Clone Clone a route such that the original one can be released and the new +// one will remain valid. +func (r *Route) Clone() Route { + r.ref.incRef() + return *r +} diff --git a/pkg/tcpip/stack/stack.go b/pkg/tcpip/stack/stack.go new file mode 100644 index 000000000..558ecdb72 --- /dev/null +++ b/pkg/tcpip/stack/stack.go @@ -0,0 +1,811 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stack provides the glue between networking protocols and the +// consumers of the networking stack. +// +// For consumers, the only function of interest is New(), everything else is +// provided by the tcpip/public package. +// +// For protocol implementers, RegisterTransportProtocolFactory() and +// RegisterNetworkProtocolFactory() are used to register protocol factories with +// the stack, which will then be used to instantiate protocol objects when +// consumers interact with the stack. +package stack + +import ( + "sync" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/ports" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // ageLimit is set to the same cache stale time used in Linux. + ageLimit = 1 * time.Minute + // resolutionTimeout is set to the same ARP timeout used in Linux. + resolutionTimeout = 1 * time.Second + // resolutionAttempts is set to the same ARP retries used in Linux. + resolutionAttempts = 3 +) + +type transportProtocolState struct { + proto TransportProtocol + defaultHandler func(*Route, TransportEndpointID, *buffer.VectorisedView) bool +} + +// TCPProbeFunc is the expected function type for a TCP probe function to be +// passed to stack.AddTCPProbe. +type TCPProbeFunc func(s TCPEndpointState) + +// TCPEndpointID is the unique 4 tuple that identifies a given endpoint. +type TCPEndpointID struct { + // LocalPort is the local port associated with the endpoint. + LocalPort uint16 + + // LocalAddress is the local [network layer] address associated with + // the endpoint. + LocalAddress tcpip.Address + + // RemotePort is the remote port associated with the endpoint. + RemotePort uint16 + + // RemoteAddress it the remote [network layer] address associated with + // the endpoint. + RemoteAddress tcpip.Address +} + +// TCPFastRecoveryState holds a copy of the internal fast recovery state of a +// TCP endpoint. +type TCPFastRecoveryState struct { + // Active if true indicates the endpoint is in fast recovery. + Active bool + + // First is the first unacknowledged sequence number being recovered. + First seqnum.Value + + // Last is the 'recover' sequence number that indicates the point at + // which we should exit recovery barring any timeouts etc. + Last seqnum.Value + + // MaxCwnd is the maximum value we are permitted to grow the congestion + // window during recovery. This is set at the time we enter recovery. + MaxCwnd int +} + +// TCPReceiverState holds a copy of the internal state of the receiver for +// a given TCP endpoint. +type TCPReceiverState struct { + // RcvNxt is the TCP variable RCV.NXT. + RcvNxt seqnum.Value + + // RcvAcc is the TCP variable RCV.ACC. + RcvAcc seqnum.Value + + // RcvWndScale is the window scaling to use for inbound segments. + RcvWndScale uint8 + + // PendingBufUsed is the number of bytes pending in the receive + // queue. + PendingBufUsed seqnum.Size + + // PendingBufSize is the size of the socket receive buffer. + PendingBufSize seqnum.Size +} + +// TCPSenderState holds a copy of the internal state of the sender for +// a given TCP Endpoint. +type TCPSenderState struct { + // LastSendTime is the time at which we sent the last segment. + LastSendTime time.Time + + // DupAckCount is the number of Duplicate ACK's received. + DupAckCount int + + // SndCwnd is the size of the sending congestion window in packets. + SndCwnd int + + // Ssthresh is the slow start threshold in packets. + Ssthresh int + + // SndCAAckCount is the number of packets consumed in congestion + // avoidance mode. + SndCAAckCount int + + // Outstanding is the number of packets in flight. + Outstanding int + + // SndWnd is the send window size in bytes. + SndWnd seqnum.Size + + // SndUna is the next unacknowledged sequence number. + SndUna seqnum.Value + + // SndNxt is the sequence number of the next segment to be sent. + SndNxt seqnum.Value + + // RTTMeasureSeqNum is the sequence number being used for the latest RTT + // measurement. + RTTMeasureSeqNum seqnum.Value + + // RTTMeasureTime is the time when the RTTMeasureSeqNum was sent. + RTTMeasureTime time.Time + + // Closed indicates that the caller has closed the endpoint for sending. + Closed bool + + // SRTT is the smoothed round-trip time as defined in section 2 of + // RFC 6298. + SRTT time.Duration + + // RTO is the retransmit timeout as defined in section of 2 of RFC 6298. + RTO time.Duration + + // RTTVar is the round-trip time variation as defined in section 2 of + // RFC 6298. + RTTVar time.Duration + + // SRTTInited if true indicates take a valid RTT measurement has been + // completed. + SRTTInited bool + + // MaxPayloadSize is the maximum size of the payload of a given segment. + // It is initialized on demand. + MaxPayloadSize int + + // SndWndScale is the number of bits to shift left when reading the send + // window size from a segment. + SndWndScale uint8 + + // MaxSentAck is the highest acknowledgemnt number sent till now. + MaxSentAck seqnum.Value + + // FastRecovery holds the fast recovery state for the endpoint. + FastRecovery TCPFastRecoveryState +} + +// TCPSACKInfo holds TCP SACK related information for a given TCP endpoint. +type TCPSACKInfo struct { + // Blocks is the list of SACK block currently received by the + // TCP endpoint. + Blocks []header.SACKBlock +} + +// TCPEndpointState is a copy of the internal state of a TCP endpoint. +type TCPEndpointState struct { + // ID is a copy of the TransportEndpointID for the endpoint. + ID TCPEndpointID + + // SegTime denotes the absolute time when this segment was received. + SegTime time.Time + + // RcvBufSize is the size of the receive socket buffer for the endpoint. + RcvBufSize int + + // RcvBufUsed is the amount of bytes actually held in the receive socket + // buffer for the endpoint. + RcvBufUsed int + + // RcvClosed if true, indicates the endpoint has been closed for reading. + RcvClosed bool + + // SendTSOk is used to indicate when the TS Option has been negotiated. + // When sendTSOk is true every non-RST segment should carry a TS as per + // RFC7323#section-1.1. + SendTSOk bool + + // RecentTS is the timestamp that should be sent in the TSEcr field of + // the timestamp for future segments sent by the endpoint. This field is + // updated if required when a new segment is received by this endpoint. + RecentTS uint32 + + // TSOffset is a randomized offset added to the value of the TSVal field + // in the timestamp option. + TSOffset uint32 + + // SACKPermitted is set to true if the peer sends the TCPSACKPermitted + // option in the SYN/SYN-ACK. + SACKPermitted bool + + // SACK holds TCP SACK related information for this endpoint. + SACK TCPSACKInfo + + // SndBufSize is the size of the socket send buffer. + SndBufSize int + + // SndBufUsed is the number of bytes held in the socket send buffer. + SndBufUsed int + + // SndClosed indicates that the endpoint has been closed for sends. + SndClosed bool + + // SndBufInQueue is the number of bytes in the send queue. + SndBufInQueue seqnum.Size + + // PacketTooBigCount is used to notify the main protocol routine how + // many times a "packet too big" control packet is received. + PacketTooBigCount int + + // SndMTU is the smallest MTU seen in the control packets received. + SndMTU int + + // Receiver holds variables related to the TCP receiver for the endpoint. + Receiver TCPReceiverState + + // Sender holds state related to the TCP Sender for the endpoint. + Sender TCPSenderState +} + +// Stack is a networking stack, with all supported protocols, NICs, and route +// table. +type Stack struct { + transportProtocols map[tcpip.TransportProtocolNumber]*transportProtocolState + networkProtocols map[tcpip.NetworkProtocolNumber]NetworkProtocol + linkAddrResolvers map[tcpip.NetworkProtocolNumber]LinkAddressResolver + + demux *transportDemuxer + + stats tcpip.Stats + + linkAddrCache *linkAddrCache + + mu sync.RWMutex + nics map[tcpip.NICID]*NIC + + // route is the route table passed in by the user via SetRouteTable(), + // it is used by FindRoute() to build a route for a specific + // destination. + routeTable []tcpip.Route + + *ports.PortManager + + // If not nil, then any new endpoints will have this probe function + // invoked everytime they receive a TCP segment. + tcpProbeFunc TCPProbeFunc +} + +// New allocates a new networking stack with only the requested networking and +// transport protocols configured with default options. +// +// Protocol options can be changed by calling the +// SetNetworkProtocolOption/SetTransportProtocolOption methods provided by the +// stack. Please refer to individual protocol implementations as to what options +// are supported. +func New(network []string, transport []string) *Stack { + s := &Stack{ + transportProtocols: make(map[tcpip.TransportProtocolNumber]*transportProtocolState), + networkProtocols: make(map[tcpip.NetworkProtocolNumber]NetworkProtocol), + linkAddrResolvers: make(map[tcpip.NetworkProtocolNumber]LinkAddressResolver), + nics: make(map[tcpip.NICID]*NIC), + linkAddrCache: newLinkAddrCache(ageLimit, resolutionTimeout, resolutionAttempts), + PortManager: ports.NewPortManager(), + } + + // Add specified network protocols. + for _, name := range network { + netProtoFactory, ok := networkProtocols[name] + if !ok { + continue + } + netProto := netProtoFactory() + s.networkProtocols[netProto.Number()] = netProto + if r, ok := netProto.(LinkAddressResolver); ok { + s.linkAddrResolvers[r.LinkAddressProtocol()] = r + } + } + + // Add specified transport protocols. + for _, name := range transport { + transProtoFactory, ok := transportProtocols[name] + if !ok { + continue + } + transProto := transProtoFactory() + s.transportProtocols[transProto.Number()] = &transportProtocolState{ + proto: transProto, + } + } + + // Create the global transport demuxer. + s.demux = newTransportDemuxer(s) + + return s +} + +// SetNetworkProtocolOption allows configuring individual protocol level +// options. This method returns an error if the protocol is not supported or +// option is not supported by the protocol implementation or the provided value +// is incorrect. +func (s *Stack) SetNetworkProtocolOption(network tcpip.NetworkProtocolNumber, option interface{}) *tcpip.Error { + netProto, ok := s.networkProtocols[network] + if !ok { + return tcpip.ErrUnknownProtocol + } + return netProto.SetOption(option) +} + +// NetworkProtocolOption allows retrieving individual protocol level option +// values. This method returns an error if the protocol is not supported or +// option is not supported by the protocol implementation. +// e.g. +// var v ipv4.MyOption +// err := s.NetworkProtocolOption(tcpip.IPv4ProtocolNumber, &v) +// if err != nil { +// ... +// } +func (s *Stack) NetworkProtocolOption(network tcpip.NetworkProtocolNumber, option interface{}) *tcpip.Error { + netProto, ok := s.networkProtocols[network] + if !ok { + return tcpip.ErrUnknownProtocol + } + return netProto.Option(option) +} + +// SetTransportProtocolOption allows configuring individual protocol level +// options. This method returns an error if the protocol is not supported or +// option is not supported by the protocol implementation or the provided value +// is incorrect. +func (s *Stack) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option interface{}) *tcpip.Error { + transProtoState, ok := s.transportProtocols[transport] + if !ok { + return tcpip.ErrUnknownProtocol + } + return transProtoState.proto.SetOption(option) +} + +// TransportProtocolOption allows retrieving individual protocol level option +// values. This method returns an error if the protocol is not supported or +// option is not supported by the protocol implementation. +// var v tcp.SACKEnabled +// if err := s.TransportProtocolOption(tcpip.TCPProtocolNumber, &v); err != nil { +// ... +// } +func (s *Stack) TransportProtocolOption(transport tcpip.TransportProtocolNumber, option interface{}) *tcpip.Error { + transProtoState, ok := s.transportProtocols[transport] + if !ok { + return tcpip.ErrUnknownProtocol + } + return transProtoState.proto.Option(option) +} + +// SetTransportProtocolHandler sets the per-stack default handler for the given +// protocol. +// +// It must be called only during initialization of the stack. Changing it as the +// stack is operating is not supported. +func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(*Route, TransportEndpointID, *buffer.VectorisedView) bool) { + state := s.transportProtocols[p] + if state != nil { + state.defaultHandler = h + } +} + +// Stats returns a snapshot of the current stats. +// +// NOTE: The underlying stats are updated using atomic instructions as a result +// the snapshot returned does not represent the value of all the stats at any +// single given point of time. +// TODO: Make stats available in sentry for debugging/diag. +func (s *Stack) Stats() tcpip.Stats { + return tcpip.Stats{ + UnknownProtocolRcvdPackets: atomic.LoadUint64(&s.stats.UnknownProtocolRcvdPackets), + UnknownNetworkEndpointRcvdPackets: atomic.LoadUint64(&s.stats.UnknownNetworkEndpointRcvdPackets), + MalformedRcvdPackets: atomic.LoadUint64(&s.stats.MalformedRcvdPackets), + DroppedPackets: atomic.LoadUint64(&s.stats.DroppedPackets), + } +} + +// MutableStats returns a mutable copy of the current stats. +// +// This is not generally exported via the public interface, but is available +// internally. +func (s *Stack) MutableStats() *tcpip.Stats { + return &s.stats +} + +// SetRouteTable assigns the route table to be used by this stack. It +// specifies which NIC to use for given destination address ranges. +func (s *Stack) SetRouteTable(table []tcpip.Route) { + s.mu.Lock() + defer s.mu.Unlock() + + s.routeTable = table +} + +// NewEndpoint creates a new transport layer endpoint of the given protocol. +func (s *Stack) NewEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + t, ok := s.transportProtocols[transport] + if !ok { + return nil, tcpip.ErrUnknownProtocol + } + + return t.proto.NewEndpoint(s, network, waiterQueue) +} + +// createNIC creates a NIC with the provided id and link-layer endpoint, and +// optionally enable it. +func (s *Stack) createNIC(id tcpip.NICID, name string, linkEP tcpip.LinkEndpointID, enabled bool) *tcpip.Error { + ep := FindLinkEndpoint(linkEP) + if ep == nil { + return tcpip.ErrBadLinkEndpoint + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Make sure id is unique. + if _, ok := s.nics[id]; ok { + return tcpip.ErrDuplicateNICID + } + + n := newNIC(s, id, name, ep) + + s.nics[id] = n + if enabled { + n.attachLinkEndpoint() + } + + return nil +} + +// CreateNIC creates a NIC with the provided id and link-layer endpoint. +func (s *Stack) CreateNIC(id tcpip.NICID, linkEP tcpip.LinkEndpointID) *tcpip.Error { + return s.createNIC(id, "", linkEP, true) +} + +// CreateNamedNIC creates a NIC with the provided id and link-layer endpoint, +// and a human-readable name. +func (s *Stack) CreateNamedNIC(id tcpip.NICID, name string, linkEP tcpip.LinkEndpointID) *tcpip.Error { + return s.createNIC(id, name, linkEP, true) +} + +// CreateDisabledNIC creates a NIC with the provided id and link-layer endpoint, +// but leave it disable. Stack.EnableNIC must be called before the link-layer +// endpoint starts delivering packets to it. +func (s *Stack) CreateDisabledNIC(id tcpip.NICID, linkEP tcpip.LinkEndpointID) *tcpip.Error { + return s.createNIC(id, "", linkEP, false) +} + +// EnableNIC enables the given NIC so that the link-layer endpoint can start +// delivering packets to it. +func (s *Stack) EnableNIC(id tcpip.NICID) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[id] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + nic.attachLinkEndpoint() + + return nil +} + +// NICSubnets returns a map of NICIDs to their associated subnets. +func (s *Stack) NICSubnets() map[tcpip.NICID][]tcpip.Subnet { + s.mu.RLock() + defer s.mu.RUnlock() + + nics := map[tcpip.NICID][]tcpip.Subnet{} + + for id, nic := range s.nics { + nics[id] = append(nics[id], nic.Subnets()...) + } + return nics +} + +// NICInfo captures the name and addresses assigned to a NIC. +type NICInfo struct { + Name string + LinkAddress tcpip.LinkAddress + ProtocolAddresses []tcpip.ProtocolAddress +} + +// NICInfo returns a map of NICIDs to their associated information. +func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo { + s.mu.RLock() + defer s.mu.RUnlock() + + nics := make(map[tcpip.NICID]NICInfo) + for id, nic := range s.nics { + nics[id] = NICInfo{ + Name: nic.name, + LinkAddress: nic.linkEP.LinkAddress(), + ProtocolAddresses: nic.Addresses(), + } + } + return nics +} + +// AddAddress adds a new network-layer address to the specified NIC. +func (s *Stack) AddAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[id] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + return nic.AddAddress(protocol, addr) +} + +// AddSubnet adds a subnet range to the specified NIC. +func (s *Stack) AddSubnet(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, subnet tcpip.Subnet) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[id] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + nic.AddSubnet(protocol, subnet) + return nil +} + +// RemoveAddress removes an existing network-layer address from the specified +// NIC. +func (s *Stack) RemoveAddress(id tcpip.NICID, addr tcpip.Address) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[id] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + return nic.RemoveAddress(addr) +} + +// FindRoute creates a route to the given destination address, leaving through +// the given nic and local address (if provided). +func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) (Route, *tcpip.Error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for i := range s.routeTable { + if (id != 0 && id != s.routeTable[i].NIC) || (len(remoteAddr) != 0 && !s.routeTable[i].Match(remoteAddr)) { + continue + } + + nic := s.nics[s.routeTable[i].NIC] + if nic == nil { + continue + } + + var ref *referencedNetworkEndpoint + if len(localAddr) != 0 { + ref = nic.findEndpoint(netProto, localAddr) + } else { + ref = nic.primaryEndpoint(netProto) + } + if ref == nil { + continue + } + + if len(remoteAddr) == 0 { + // If no remote address was provided, then the route + // provided will refer to the link local address. + remoteAddr = ref.ep.ID().LocalAddress + } + + r := makeRoute(netProto, ref.ep.ID().LocalAddress, remoteAddr, ref) + r.NextHop = s.routeTable[i].Gateway + return r, nil + } + + return Route{}, tcpip.ErrNoRoute +} + +// CheckNetworkProtocol checks if a given network protocol is enabled in the +// stack. +func (s *Stack) CheckNetworkProtocol(protocol tcpip.NetworkProtocolNumber) bool { + _, ok := s.networkProtocols[protocol] + return ok +} + +// CheckLocalAddress determines if the given local address exists, and if it +// does, returns the id of the NIC it's bound to. Returns 0 if the address +// does not exist. +func (s *Stack) CheckLocalAddress(nicid tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.NICID { + s.mu.RLock() + defer s.mu.RUnlock() + + // If a NIC is specified, we try to find the address there only. + if nicid != 0 { + nic := s.nics[nicid] + if nic == nil { + return 0 + } + + ref := nic.findEndpoint(protocol, addr) + if ref == nil { + return 0 + } + + ref.decRef() + + return nic.id + } + + // Go through all the NICs. + for _, nic := range s.nics { + ref := nic.findEndpoint(protocol, addr) + if ref != nil { + ref.decRef() + return nic.id + } + } + + return 0 +} + +// SetPromiscuousMode enables or disables promiscuous mode in the given NIC. +func (s *Stack) SetPromiscuousMode(nicID tcpip.NICID, enable bool) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[nicID] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + nic.setPromiscuousMode(enable) + + return nil +} + +// SetSpoofing enables or disables address spoofing in the given NIC, allowing +// endpoints to bind to any address in the NIC. +func (s *Stack) SetSpoofing(nicID tcpip.NICID, enable bool) *tcpip.Error { + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[nicID] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + nic.setSpoofing(enable) + + return nil +} + +// AddLinkAddress adds a link address to the stack link cache. +func (s *Stack) AddLinkAddress(nicid tcpip.NICID, addr tcpip.Address, linkAddr tcpip.LinkAddress) { + fullAddr := tcpip.FullAddress{NIC: nicid, Addr: addr} + s.linkAddrCache.add(fullAddr, linkAddr) + // TODO: provide a way for a + // transport endpoint to receive a signal that AddLinkAddress + // for a particular address has been called. +} + +// GetLinkAddress implements LinkAddressCache.GetLinkAddress. +func (s *Stack) GetLinkAddress(nicid tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, waker *sleep.Waker) (tcpip.LinkAddress, *tcpip.Error) { + s.mu.RLock() + nic := s.nics[nicid] + if nic == nil { + s.mu.RUnlock() + return "", tcpip.ErrUnknownNICID + } + s.mu.RUnlock() + + fullAddr := tcpip.FullAddress{NIC: nicid, Addr: addr} + linkRes := s.linkAddrResolvers[protocol] + return s.linkAddrCache.get(fullAddr, linkRes, localAddr, nic.linkEP, waker) +} + +// RemoveWaker implements LinkAddressCache.RemoveWaker. +func (s *Stack) RemoveWaker(nicid tcpip.NICID, addr tcpip.Address, waker *sleep.Waker) { + s.mu.RLock() + defer s.mu.RUnlock() + + if nic := s.nics[nicid]; nic == nil { + fullAddr := tcpip.FullAddress{NIC: nicid, Addr: addr} + s.linkAddrCache.removeWaker(fullAddr, waker) + } +} + +// RegisterTransportEndpoint registers the given endpoint with the stack +// transport dispatcher. Received packets that match the provided id will be +// delivered to the given endpoint; specifying a nic is optional, but +// nic-specific IDs have precedence over global ones. +func (s *Stack) RegisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint) *tcpip.Error { + if nicID == 0 { + return s.demux.registerEndpoint(netProtos, protocol, id, ep) + } + + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[nicID] + if nic == nil { + return tcpip.ErrUnknownNICID + } + + return nic.demux.registerEndpoint(netProtos, protocol, id, ep) +} + +// UnregisterTransportEndpoint removes the endpoint with the given id from the +// stack transport dispatcher. +func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID) { + if nicID == 0 { + s.demux.unregisterEndpoint(netProtos, protocol, id) + return + } + + s.mu.RLock() + defer s.mu.RUnlock() + + nic := s.nics[nicID] + if nic != nil { + nic.demux.unregisterEndpoint(netProtos, protocol, id) + } +} + +// NetworkProtocolInstance returns the protocol instance in the stack for the +// specified network protocol. This method is public for protocol implementers +// and tests to use. +func (s *Stack) NetworkProtocolInstance(num tcpip.NetworkProtocolNumber) NetworkProtocol { + if p, ok := s.networkProtocols[num]; ok { + return p + } + return nil +} + +// TransportProtocolInstance returns the protocol instance in the stack for the +// specified transport protocol. This method is public for protocol implementers +// and tests to use. +func (s *Stack) TransportProtocolInstance(num tcpip.TransportProtocolNumber) TransportProtocol { + if pState, ok := s.transportProtocols[num]; ok { + return pState.proto + } + return nil +} + +// AddTCPProbe installs a probe function that will be invoked on every segment +// received by a given TCP endpoint. The probe function is passed a copy of the +// TCP endpoint state. +// +// NOTE: TCPProbe is added only to endpoints created after this call. Endpoints +// created prior to this call will not call the probe function. +// +// Further, installing two different probes back to back can result in some +// endpoints calling the first one and some the second one. There is no +// guarantee provided on which probe will be invoked. Ideally this should only +// be called once per stack. +func (s *Stack) AddTCPProbe(probe TCPProbeFunc) { + s.mu.Lock() + s.tcpProbeFunc = probe + s.mu.Unlock() +} + +// GetTCPProbe returns the TCPProbeFunc if installed with AddTCPProbe, nil +// otherwise. +func (s *Stack) GetTCPProbe() TCPProbeFunc { + s.mu.Lock() + p := s.tcpProbeFunc + s.mu.Unlock() + return p +} + +// RemoveTCPProbe removes an installed TCP probe. +// +// NOTE: This only ensures that endpoints created after this call do not +// have a probe attached. Endpoints already created will continue to invoke +// TCP probe. +func (s *Stack) RemoveTCPProbe() { + s.mu.Lock() + s.tcpProbeFunc = nil + s.mu.Unlock() +} diff --git a/pkg/tcpip/stack/stack_global_state.go b/pkg/tcpip/stack/stack_global_state.go new file mode 100644 index 000000000..030ae98d1 --- /dev/null +++ b/pkg/tcpip/stack/stack_global_state.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +// StackFromEnv is the global stack created in restore run. +// FIXME: remove this variable once tcpip S/R is fully supported. +var StackFromEnv *Stack diff --git a/pkg/tcpip/stack/stack_test.go b/pkg/tcpip/stack/stack_test.go new file mode 100644 index 000000000..b416065d7 --- /dev/null +++ b/pkg/tcpip/stack/stack_test.go @@ -0,0 +1,760 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stack_test contains tests for the stack. It is in its own package so +// that the tests can also validate that all definitions needed to implement +// transport and network protocols are properly exported by the stack package. +package stack_test + +import ( + "math" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +const ( + fakeNetNumber tcpip.NetworkProtocolNumber = math.MaxUint32 + fakeNetHeaderLen = 12 + + // fakeControlProtocol is used for control packets that represent + // destination port unreachable. + fakeControlProtocol tcpip.TransportProtocolNumber = 2 + + // defaultMTU is the MTU, in bytes, used throughout the tests, except + // where another value is explicitly used. It is chosen to match the MTU + // of loopback interfaces on linux systems. + defaultMTU = 65536 +) + +// fakeNetworkEndpoint is a network-layer protocol endpoint. It counts sent and +// received packets; the counts of all endpoints are aggregated in the protocol +// descriptor. +// +// Headers of this protocol are fakeNetHeaderLen bytes, but we currently only +// use the first three: destination address, source address, and transport +// protocol. They're all one byte fields to simplify parsing. +type fakeNetworkEndpoint struct { + nicid tcpip.NICID + id stack.NetworkEndpointID + proto *fakeNetworkProtocol + dispatcher stack.TransportDispatcher + linkEP stack.LinkEndpoint +} + +func (f *fakeNetworkEndpoint) MTU() uint32 { + return f.linkEP.MTU() - uint32(f.MaxHeaderLength()) +} + +func (f *fakeNetworkEndpoint) NICID() tcpip.NICID { + return f.nicid +} + +func (f *fakeNetworkEndpoint) ID() *stack.NetworkEndpointID { + return &f.id +} + +func (f *fakeNetworkEndpoint) HandlePacket(r *stack.Route, vv *buffer.VectorisedView) { + // Increment the received packet count in the protocol descriptor. + f.proto.packetCount[int(f.id.LocalAddress[0])%len(f.proto.packetCount)]++ + + // Consume the network header. + b := vv.First() + vv.TrimFront(fakeNetHeaderLen) + + // Handle control packets. + if b[2] == uint8(fakeControlProtocol) { + nb := vv.First() + if len(nb) < fakeNetHeaderLen { + return + } + + vv.TrimFront(fakeNetHeaderLen) + f.dispatcher.DeliverTransportControlPacket(tcpip.Address(nb[1:2]), tcpip.Address(nb[0:1]), fakeNetNumber, tcpip.TransportProtocolNumber(nb[2]), stack.ControlPortUnreachable, 0, vv) + return + } + + // Dispatch the packet to the transport protocol. + f.dispatcher.DeliverTransportPacket(r, tcpip.TransportProtocolNumber(b[2]), vv) +} + +func (f *fakeNetworkEndpoint) MaxHeaderLength() uint16 { + return f.linkEP.MaxHeaderLength() + fakeNetHeaderLen +} + +func (f *fakeNetworkEndpoint) PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, dstAddr tcpip.Address) uint16 { + return 0 +} + +func (f *fakeNetworkEndpoint) Capabilities() stack.LinkEndpointCapabilities { + return f.linkEP.Capabilities() +} + +func (f *fakeNetworkEndpoint) WritePacket(r *stack.Route, hdr *buffer.Prependable, payload buffer.View, protocol tcpip.TransportProtocolNumber) *tcpip.Error { + // Increment the sent packet count in the protocol descriptor. + f.proto.sendPacketCount[int(r.RemoteAddress[0])%len(f.proto.sendPacketCount)]++ + + // Add the protocol's header to the packet and send it to the link + // endpoint. + b := hdr.Prepend(fakeNetHeaderLen) + b[0] = r.RemoteAddress[0] + b[1] = f.id.LocalAddress[0] + b[2] = byte(protocol) + return f.linkEP.WritePacket(r, hdr, payload, fakeNetNumber) +} + +func (*fakeNetworkEndpoint) Close() {} + +type fakeNetGoodOption bool + +type fakeNetBadOption bool + +type fakeNetInvalidValueOption int + +type fakeNetOptions struct { + good bool +} + +// fakeNetworkProtocol is a network-layer protocol descriptor. It aggregates the +// number of packets sent and received via endpoints of this protocol. The index +// where packets are added is given by the packet's destination address MOD 10. +type fakeNetworkProtocol struct { + packetCount [10]int + sendPacketCount [10]int + opts fakeNetOptions +} + +func (f *fakeNetworkProtocol) Number() tcpip.NetworkProtocolNumber { + return fakeNetNumber +} + +func (f *fakeNetworkProtocol) MinimumPacketSize() int { + return fakeNetHeaderLen +} + +func (*fakeNetworkProtocol) ParseAddresses(v buffer.View) (src, dst tcpip.Address) { + return tcpip.Address(v[1:2]), tcpip.Address(v[0:1]) +} + +func (f *fakeNetworkProtocol) NewEndpoint(nicid tcpip.NICID, addr tcpip.Address, linkAddrCache stack.LinkAddressCache, dispatcher stack.TransportDispatcher, linkEP stack.LinkEndpoint) (stack.NetworkEndpoint, *tcpip.Error) { + return &fakeNetworkEndpoint{ + nicid: nicid, + id: stack.NetworkEndpointID{addr}, + proto: f, + dispatcher: dispatcher, + linkEP: linkEP, + }, nil +} + +func (f *fakeNetworkProtocol) SetOption(option interface{}) *tcpip.Error { + switch v := option.(type) { + case fakeNetGoodOption: + f.opts.good = bool(v) + return nil + case fakeNetInvalidValueOption: + return tcpip.ErrInvalidOptionValue + default: + return tcpip.ErrUnknownProtocolOption + } +} + +func (f *fakeNetworkProtocol) Option(option interface{}) *tcpip.Error { + switch v := option.(type) { + case *fakeNetGoodOption: + *v = fakeNetGoodOption(f.opts.good) + return nil + default: + return tcpip.ErrUnknownProtocolOption + } +} + +func TestNetworkReceive(t *testing.T) { + // Create a stack with the fake network protocol, one nic, and two + // addresses attached to it: 1 & 2. + id, linkEP := channel.New(10, defaultMTU, "") + s := stack.New([]string{"fakeNet"}, nil) + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x02"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + var views [1]buffer.View + // Allocate the buffer containing the packet that will be injected into + // the stack. + buf := buffer.NewView(30) + + // Make sure packet with wrong address is not delivered. + buf[0] = 3 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 0 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 0) + } + if fakeNet.packetCount[2] != 0 { + t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 0) + } + + // Make sure packet is delivered to first endpoint. + buf[0] = 1 + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + if fakeNet.packetCount[2] != 0 { + t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 0) + } + + // Make sure packet is delivered to second endpoint. + buf[0] = 2 + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + if fakeNet.packetCount[2] != 1 { + t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1) + } + + // Make sure packet is not delivered if protocol number is wrong. + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber-1, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + if fakeNet.packetCount[2] != 1 { + t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1) + } + + // Make sure packet that is too small is dropped. + buf.CapLength(2) + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + if fakeNet.packetCount[2] != 1 { + t.Errorf("packetCount[2] = %d, want %d", fakeNet.packetCount[2], 1) + } +} + +func sendTo(t *testing.T, s *stack.Stack, addr tcpip.Address) { + r, err := s.FindRoute(0, "", addr, fakeNetNumber) + if err != nil { + t.Fatalf("FindRoute failed: %v", err) + } + defer r.Release() + + hdr := buffer.NewPrependable(int(r.MaxHeaderLength())) + err = r.WritePacket(&hdr, nil, fakeTransNumber) + if err != nil { + t.Errorf("WritePacket failed: %v", err) + return + } +} + +func TestNetworkSend(t *testing.T) { + // Create a stack with the fake network protocol, one nic, and one + // address: 1. The route table sends all packets through the only + // existing nic. + id, linkEP := channel.New(10, defaultMTU, "") + s := stack.New([]string{"fakeNet"}, nil) + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("NewNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{"\x00", "\x00", "\x00", 1}}) + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + // Make sure that the link-layer endpoint received the outbound packet. + sendTo(t, s, "\x03") + if c := linkEP.Drain(); c != 1 { + t.Errorf("packetCount = %d, want %d", c, 1) + } +} + +func TestNetworkSendMultiRoute(t *testing.T) { + // Create a stack with the fake network protocol, two nics, and two + // addresses per nic, the first nic has odd address, the second one has + // even addresses. + s := stack.New([]string{"fakeNet"}, nil) + + id1, linkEP1 := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id1); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x03"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + id2, linkEP2 := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(2, id2); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(2, fakeNetNumber, "\x02"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(2, fakeNetNumber, "\x04"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + // Set a route table that sends all packets with odd destination + // addresses through the first NIC, and all even destination address + // through the second one. + s.SetRouteTable([]tcpip.Route{ + {"\x01", "\x01", "\x00", 1}, + {"\x00", "\x01", "\x00", 2}, + }) + + // Send a packet to an odd destination. + sendTo(t, s, "\x05") + + if c := linkEP1.Drain(); c != 1 { + t.Errorf("packetCount = %d, want %d", c, 1) + } + + // Send a packet to an even destination. + sendTo(t, s, "\x06") + + if c := linkEP2.Drain(); c != 1 { + t.Errorf("packetCount = %d, want %d", c, 1) + } +} + +func testRoute(t *testing.T, s *stack.Stack, nic tcpip.NICID, srcAddr, dstAddr, expectedSrcAddr tcpip.Address) { + r, err := s.FindRoute(nic, srcAddr, dstAddr, fakeNetNumber) + if err != nil { + t.Fatalf("FindRoute failed: %v", err) + } + + defer r.Release() + + if r.LocalAddress != expectedSrcAddr { + t.Fatalf("Bad source address: expected %v, got %v", expectedSrcAddr, r.LocalAddress) + } + + if r.RemoteAddress != dstAddr { + t.Fatalf("Bad destination address: expected %v, got %v", dstAddr, r.RemoteAddress) + } +} + +func testNoRoute(t *testing.T, s *stack.Stack, nic tcpip.NICID, srcAddr, dstAddr tcpip.Address) { + _, err := s.FindRoute(nic, srcAddr, dstAddr, fakeNetNumber) + if err != tcpip.ErrNoRoute { + t.Fatalf("FindRoute returned unexpected error, expected tcpip.ErrNoRoute, got %v", err) + } +} + +func TestRoutes(t *testing.T) { + // Create a stack with the fake network protocol, two nics, and two + // addresses per nic, the first nic has odd address, the second one has + // even addresses. + s := stack.New([]string{"fakeNet"}, nil) + + id1, _ := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id1); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x03"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + id2, _ := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(2, id2); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(2, fakeNetNumber, "\x02"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(2, fakeNetNumber, "\x04"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + // Set a route table that sends all packets with odd destination + // addresses through the first NIC, and all even destination address + // through the second one. + s.SetRouteTable([]tcpip.Route{ + {"\x01", "\x01", "\x00", 1}, + {"\x00", "\x01", "\x00", 2}, + }) + + // Test routes to odd address. + testRoute(t, s, 0, "", "\x05", "\x01") + testRoute(t, s, 0, "\x01", "\x05", "\x01") + testRoute(t, s, 1, "\x01", "\x05", "\x01") + testRoute(t, s, 0, "\x03", "\x05", "\x03") + testRoute(t, s, 1, "\x03", "\x05", "\x03") + + // Test routes to even address. + testRoute(t, s, 0, "", "\x06", "\x02") + testRoute(t, s, 0, "\x02", "\x06", "\x02") + testRoute(t, s, 2, "\x02", "\x06", "\x02") + testRoute(t, s, 0, "\x04", "\x06", "\x04") + testRoute(t, s, 2, "\x04", "\x06", "\x04") + + // Try to send to odd numbered address from even numbered ones, then + // vice-versa. + testNoRoute(t, s, 0, "\x02", "\x05") + testNoRoute(t, s, 2, "\x02", "\x05") + testNoRoute(t, s, 0, "\x04", "\x05") + testNoRoute(t, s, 2, "\x04", "\x05") + + testNoRoute(t, s, 0, "\x01", "\x06") + testNoRoute(t, s, 1, "\x01", "\x06") + testNoRoute(t, s, 0, "\x03", "\x06") + testNoRoute(t, s, 1, "\x03", "\x06") +} + +func TestAddressRemoval(t *testing.T) { + s := stack.New([]string{"fakeNet"}, nil) + + id, linkEP := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + var views [1]buffer.View + buf := buffer.NewView(30) + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + // Write a packet, and check that it gets delivered. + fakeNet.packetCount[1] = 0 + buf[0] = 1 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + + // Remove the address, then check that packet doesn't get delivered + // anymore. + if err := s.RemoveAddress(1, "\x01"); err != nil { + t.Fatalf("RemoveAddress failed: %v", err) + } + + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + + // Check that removing the same address fails. + if err := s.RemoveAddress(1, "\x01"); err != tcpip.ErrBadLocalAddress { + t.Fatalf("RemoveAddress failed: %v", err) + } +} + +func TestDelayedRemovalDueToRoute(t *testing.T) { + s := stack.New([]string{"fakeNet"}, nil) + + id, linkEP := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + {"\x00", "\x00", "\x00", 1}, + }) + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + var views [1]buffer.View + buf := buffer.NewView(30) + + // Write a packet, and check that it gets delivered. + fakeNet.packetCount[1] = 0 + buf[0] = 1 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + + // Get a route, check that packet is still deliverable. + r, err := s.FindRoute(0, "", "\x02", fakeNetNumber) + if err != nil { + t.Fatalf("FindRoute failed: %v", err) + } + + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 2 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 2) + } + + // Remove the address, then check that packet is still deliverable + // because the route is keeping the address alive. + if err := s.RemoveAddress(1, "\x01"); err != nil { + t.Fatalf("RemoveAddress failed: %v", err) + } + + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 3 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 3) + } + + // Check that removing the same address fails. + if err := s.RemoveAddress(1, "\x01"); err != tcpip.ErrBadLocalAddress { + t.Fatalf("RemoveAddress failed: %v", err) + } + + // Release the route, then check that packet is not deliverable anymore. + r.Release() + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 3 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 3) + } +} + +func TestPromiscuousMode(t *testing.T) { + s := stack.New([]string{"fakeNet"}, nil) + + id, linkEP := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + {"\x00", "\x00", "\x00", 1}, + }) + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + var views [1]buffer.View + buf := buffer.NewView(30) + + // Write a packet, and check that it doesn't get delivered as we don't + // have a matching endpoint. + fakeNet.packetCount[1] = 0 + buf[0] = 1 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 0 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 0) + } + + // Set promiscuous mode, then check that packet is delivered. + if err := s.SetPromiscuousMode(1, true); err != nil { + t.Fatalf("SetPromiscuousMode failed: %v", err) + } + + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } + + // Check that we can't get a route as there is no local address. + _, err := s.FindRoute(0, "", "\x02", fakeNetNumber) + if err != tcpip.ErrNoRoute { + t.Fatalf("FindRoute returned unexpected status: expected %v, got %v", tcpip.ErrNoRoute, err) + } + + // Set promiscuous mode to false, then check that packet can't be + // delivered anymore. + if err := s.SetPromiscuousMode(1, false); err != nil { + t.Fatalf("SetPromiscuousMode failed: %v", err) + } + + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } +} + +func TestAddressSpoofing(t *testing.T) { + srcAddr := tcpip.Address("\x01") + dstAddr := tcpip.Address("\x02") + + s := stack.New([]string{"fakeNet"}, nil) + + id, _ := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, dstAddr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + {"\x00", "\x00", "\x00", 1}, + }) + + // With address spoofing disabled, FindRoute does not permit an address + // that was not added to the NIC to be used as the source. + r, err := s.FindRoute(0, srcAddr, dstAddr, fakeNetNumber) + if err == nil { + t.Errorf("FindRoute succeeded with route %+v when it should have failed", r) + } + + // With address spoofing enabled, FindRoute permits any address to be used + // as the source. + if err := s.SetSpoofing(1, true); err != nil { + t.Fatalf("SetSpoofing failed: %v", err) + } + r, err = s.FindRoute(0, srcAddr, dstAddr, fakeNetNumber) + if err != nil { + t.Fatalf("FindRoute failed: %v", err) + } + if r.LocalAddress != srcAddr { + t.Errorf("Route has wrong local address: got %v, wanted %v", r.LocalAddress, srcAddr) + } + if r.RemoteAddress != dstAddr { + t.Errorf("Route has wrong remote address: got %v, wanted %v", r.RemoteAddress, dstAddr) + } +} + +// Set the subnet, then check that packet is delivered. +func TestSubnetAcceptsMatchingPacket(t *testing.T) { + s := stack.New([]string{"fakeNet"}, nil) + + id, linkEP := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + {"\x00", "\x00", "\x00", 1}, + }) + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + var views [1]buffer.View + buf := buffer.NewView(30) + buf[0] = 1 + fakeNet.packetCount[1] = 0 + subnet, err := tcpip.NewSubnet(tcpip.Address("\x00"), tcpip.AddressMask("\xF0")) + if err != nil { + t.Fatalf("NewSubnet failed: %v", err) + } + if err := s.AddSubnet(1, fakeNetNumber, subnet); err != nil { + t.Fatalf("AddSubnet failed: %v", err) + } + + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 1 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 1) + } +} + +// Set destination outside the subnet, then check it doesn't get delivered. +func TestSubnetRejectsNonmatchingPacket(t *testing.T) { + s := stack.New([]string{"fakeNet"}, nil) + + id, linkEP := channel.New(10, defaultMTU, "") + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + {"\x00", "\x00", "\x00", 1}, + }) + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + var views [1]buffer.View + buf := buffer.NewView(30) + buf[0] = 1 + fakeNet.packetCount[1] = 0 + subnet, err := tcpip.NewSubnet(tcpip.Address("\x10"), tcpip.AddressMask("\xF0")) + if err != nil { + t.Fatalf("NewSubnet failed: %v", err) + } + if err := s.AddSubnet(1, fakeNetNumber, subnet); err != nil { + t.Fatalf("AddSubnet failed: %v", err) + } + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeNet.packetCount[1] != 0 { + t.Errorf("packetCount[1] = %d, want %d", fakeNet.packetCount[1], 0) + } +} + +func TestNetworkOptions(t *testing.T) { + s := stack.New([]string{"fakeNet"}, []string{}) + + // Try an unsupported network protocol. + if err := s.SetNetworkProtocolOption(tcpip.NetworkProtocolNumber(99999), fakeNetGoodOption(false)); err != tcpip.ErrUnknownProtocol { + t.Fatalf("SetNetworkProtocolOption(fakeNet2, blah, false) = %v, want = tcpip.ErrUnknownProtocol", err) + } + + testCases := []struct { + option interface{} + wantErr *tcpip.Error + verifier func(t *testing.T, p stack.NetworkProtocol) + }{ + {fakeNetGoodOption(true), nil, func(t *testing.T, p stack.NetworkProtocol) { + t.Helper() + fakeNet := p.(*fakeNetworkProtocol) + if fakeNet.opts.good != true { + t.Fatalf("fakeNet.opts.good = false, want = true") + } + var v fakeNetGoodOption + if err := s.NetworkProtocolOption(fakeNetNumber, &v); err != nil { + t.Fatalf("s.NetworkProtocolOption(fakeNetNumber, &v) = %v, want = nil, where v is option %T", v, err) + } + if v != true { + t.Fatalf("s.NetworkProtocolOption(fakeNetNumber, &v) returned v = %v, want = true", v) + } + }}, + {fakeNetBadOption(true), tcpip.ErrUnknownProtocolOption, nil}, + {fakeNetInvalidValueOption(1), tcpip.ErrInvalidOptionValue, nil}, + } + for _, tc := range testCases { + if got := s.SetNetworkProtocolOption(fakeNetNumber, tc.option); got != tc.wantErr { + t.Errorf("s.SetNetworkProtocolOption(fakeNet, %v) = %v, want = %v", tc.option, got, tc.wantErr) + } + if tc.verifier != nil { + tc.verifier(t, s.NetworkProtocolInstance(fakeNetNumber)) + } + } +} + +func init() { + stack.RegisterNetworkProtocolFactory("fakeNet", func() stack.NetworkProtocol { + return &fakeNetworkProtocol{} + }) +} diff --git a/pkg/tcpip/stack/transport_demuxer.go b/pkg/tcpip/stack/transport_demuxer.go new file mode 100644 index 000000000..3c0d7aa31 --- /dev/null +++ b/pkg/tcpip/stack/transport_demuxer.go @@ -0,0 +1,166 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" +) + +type protocolIDs struct { + network tcpip.NetworkProtocolNumber + transport tcpip.TransportProtocolNumber +} + +// transportEndpoints manages all endpoints of a given protocol. It has its own +// mutex so as to reduce interference between protocols. +type transportEndpoints struct { + mu sync.RWMutex + endpoints map[TransportEndpointID]TransportEndpoint +} + +// transportDemuxer demultiplexes packets targeted at a transport endpoint +// (i.e., after they've been parsed by the network layer). It does two levels +// of demultiplexing: first based on the network and transport protocols, then +// based on endpoints IDs. +type transportDemuxer struct { + protocol map[protocolIDs]*transportEndpoints +} + +func newTransportDemuxer(stack *Stack) *transportDemuxer { + d := &transportDemuxer{protocol: make(map[protocolIDs]*transportEndpoints)} + + // Add each network and transport pair to the demuxer. + for netProto := range stack.networkProtocols { + for proto := range stack.transportProtocols { + d.protocol[protocolIDs{netProto, proto}] = &transportEndpoints{endpoints: make(map[TransportEndpointID]TransportEndpoint)} + } + } + + return d +} + +// registerEndpoint registers the given endpoint with the dispatcher such that +// packets that match the endpoint ID are delivered to it. +func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint) *tcpip.Error { + for i, n := range netProtos { + if err := d.singleRegisterEndpoint(n, protocol, id, ep); err != nil { + d.unregisterEndpoint(netProtos[:i], protocol, id) + return err + } + } + + return nil +} + +func (d *transportDemuxer) singleRegisterEndpoint(netProto tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint) *tcpip.Error { + eps, ok := d.protocol[protocolIDs{netProto, protocol}] + if !ok { + return nil + } + + eps.mu.Lock() + defer eps.mu.Unlock() + + if _, ok := eps.endpoints[id]; ok { + return tcpip.ErrPortInUse + } + + eps.endpoints[id] = ep + + return nil +} + +// unregisterEndpoint unregisters the endpoint with the given id such that it +// won't receive any more packets. +func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID) { + for _, n := range netProtos { + if eps, ok := d.protocol[protocolIDs{n, protocol}]; ok { + eps.mu.Lock() + delete(eps.endpoints, id) + eps.mu.Unlock() + } + } +} + +// deliverPacket attempts to deliver the given packet. Returns true if it found +// an endpoint, false otherwise. +func (d *transportDemuxer) deliverPacket(r *Route, protocol tcpip.TransportProtocolNumber, vv *buffer.VectorisedView, id TransportEndpointID) bool { + eps, ok := d.protocol[protocolIDs{r.NetProto, protocol}] + if !ok { + return false + } + + eps.mu.RLock() + ep := d.findEndpointLocked(eps, vv, id) + eps.mu.RUnlock() + + // Fail if we didn't find one. + if ep == nil { + return false + } + + // Deliver the packet. + ep.HandlePacket(r, id, vv) + + return true +} + +// deliverControlPacket attempts to deliver the given control packet. Returns +// true if it found an endpoint, false otherwise. +func (d *transportDemuxer) deliverControlPacket(net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, typ ControlType, extra uint32, vv *buffer.VectorisedView, id TransportEndpointID) bool { + eps, ok := d.protocol[protocolIDs{net, trans}] + if !ok { + return false + } + + // Try to find the endpoint. + eps.mu.RLock() + ep := d.findEndpointLocked(eps, vv, id) + eps.mu.RUnlock() + + // Fail if we didn't find one. + if ep == nil { + return false + } + + // Deliver the packet. + ep.HandleControlPacket(id, typ, extra, vv) + + return true +} + +func (d *transportDemuxer) findEndpointLocked(eps *transportEndpoints, vv *buffer.VectorisedView, id TransportEndpointID) TransportEndpoint { + // Try to find a match with the id as provided. + if ep := eps.endpoints[id]; ep != nil { + return ep + } + + // Try to find a match with the id minus the local address. + nid := id + + nid.LocalAddress = "" + if ep := eps.endpoints[nid]; ep != nil { + return ep + } + + // Try to find a match with the id minus the remote part. + nid.LocalAddress = id.LocalAddress + nid.RemoteAddress = "" + nid.RemotePort = 0 + if ep := eps.endpoints[nid]; ep != nil { + return ep + } + + // Try to find a match with only the local port. + nid.LocalAddress = "" + if ep := eps.endpoints[nid]; ep != nil { + return ep + } + + return nil +} diff --git a/pkg/tcpip/stack/transport_test.go b/pkg/tcpip/stack/transport_test.go new file mode 100644 index 000000000..7e072e96e --- /dev/null +++ b/pkg/tcpip/stack/transport_test.go @@ -0,0 +1,420 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stack_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + fakeTransNumber tcpip.TransportProtocolNumber = 1 + fakeTransHeaderLen = 3 +) + +// fakeTransportEndpoint is a transport-layer protocol endpoint. It counts +// received packets; the counts of all endpoints are aggregated in the protocol +// descriptor. +// +// Headers of this protocol are fakeTransHeaderLen bytes, but we currently don't +// use it. +type fakeTransportEndpoint struct { + id stack.TransportEndpointID + stack *stack.Stack + netProto tcpip.NetworkProtocolNumber + proto *fakeTransportProtocol + peerAddr tcpip.Address + route stack.Route +} + +func newFakeTransportEndpoint(stack *stack.Stack, proto *fakeTransportProtocol, netProto tcpip.NetworkProtocolNumber) tcpip.Endpoint { + return &fakeTransportEndpoint{stack: stack, netProto: netProto, proto: proto} +} + +func (f *fakeTransportEndpoint) Close() { + f.route.Release() +} + +func (*fakeTransportEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + return mask +} + +func (*fakeTransportEndpoint) Read(*tcpip.FullAddress) (buffer.View, *tcpip.Error) { + return buffer.View{}, nil +} + +func (f *fakeTransportEndpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, *tcpip.Error) { + if len(f.route.RemoteAddress) == 0 { + return 0, tcpip.ErrNoRoute + } + + hdr := buffer.NewPrependable(int(f.route.MaxHeaderLength())) + v, err := p.Get(p.Size()) + if err != nil { + return 0, err + } + if err := f.route.WritePacket(&hdr, v, fakeTransNumber); err != nil { + return 0, err + } + + return uintptr(len(v)), nil +} + +func (f *fakeTransportEndpoint) Peek([][]byte) (uintptr, *tcpip.Error) { + return 0, nil +} + +// SetSockOpt sets a socket option. Currently not supported. +func (*fakeTransportEndpoint) SetSockOpt(interface{}) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// GetSockOpt implements tcpip.Endpoint.GetSockOpt. +func (*fakeTransportEndpoint) GetSockOpt(opt interface{}) *tcpip.Error { + switch opt.(type) { + case tcpip.ErrorOption: + return nil + } + return tcpip.ErrInvalidEndpointState +} + +func (f *fakeTransportEndpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { + f.peerAddr = addr.Addr + + // Find the route. + r, err := f.stack.FindRoute(addr.NIC, "", addr.Addr, fakeNetNumber) + if err != nil { + return tcpip.ErrNoRoute + } + defer r.Release() + + // Try to register so that we can start receiving packets. + f.id.RemoteAddress = addr.Addr + err = f.stack.RegisterTransportEndpoint(0, []tcpip.NetworkProtocolNumber{fakeNetNumber}, fakeTransNumber, f.id, f) + if err != nil { + return err + } + + f.route = r.Clone() + + return nil +} + +func (f *fakeTransportEndpoint) ConnectEndpoint(e tcpip.Endpoint) *tcpip.Error { + return nil +} + +func (*fakeTransportEndpoint) Shutdown(tcpip.ShutdownFlags) *tcpip.Error { + return nil +} + +func (*fakeTransportEndpoint) Reset() { +} + +func (*fakeTransportEndpoint) Listen(int) *tcpip.Error { + return nil +} + +func (*fakeTransportEndpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { + return nil, nil, nil +} + +func (*fakeTransportEndpoint) Bind(_ tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + return commit() +} + +func (*fakeTransportEndpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{}, nil +} + +func (*fakeTransportEndpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { + return tcpip.FullAddress{}, nil +} + +func (f *fakeTransportEndpoint) HandlePacket(*stack.Route, stack.TransportEndpointID, *buffer.VectorisedView) { + // Increment the number of received packets. + f.proto.packetCount++ +} + +func (f *fakeTransportEndpoint) HandleControlPacket(stack.TransportEndpointID, stack.ControlType, uint32, *buffer.VectorisedView) { + // Increment the number of received control packets. + f.proto.controlCount++ +} + +type fakeTransportGoodOption bool + +type fakeTransportBadOption bool + +type fakeTransportInvalidValueOption int + +type fakeTransportProtocolOptions struct { + good bool +} + +// fakeTransportProtocol is a transport-layer protocol descriptor. It +// aggregates the number of packets received via endpoints of this protocol. +type fakeTransportProtocol struct { + packetCount int + controlCount int + opts fakeTransportProtocolOptions +} + +func (*fakeTransportProtocol) Number() tcpip.TransportProtocolNumber { + return fakeTransNumber +} + +func (f *fakeTransportProtocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, _ *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + return newFakeTransportEndpoint(stack, f, netProto), nil +} + +func (*fakeTransportProtocol) MinimumPacketSize() int { + return fakeTransHeaderLen +} + +func (*fakeTransportProtocol) ParsePorts(buffer.View) (src, dst uint16, err *tcpip.Error) { + return 0, 0, nil +} + +func (*fakeTransportProtocol) HandleUnknownDestinationPacket(*stack.Route, stack.TransportEndpointID, *buffer.VectorisedView) bool { + return true +} + +func (f *fakeTransportProtocol) SetOption(option interface{}) *tcpip.Error { + switch v := option.(type) { + case fakeTransportGoodOption: + f.opts.good = bool(v) + return nil + case fakeTransportInvalidValueOption: + return tcpip.ErrInvalidOptionValue + default: + return tcpip.ErrUnknownProtocolOption + } +} + +func (f *fakeTransportProtocol) Option(option interface{}) *tcpip.Error { + switch v := option.(type) { + case *fakeTransportGoodOption: + *v = fakeTransportGoodOption(f.opts.good) + return nil + default: + return tcpip.ErrUnknownProtocolOption + } +} + +func TestTransportReceive(t *testing.T) { + id, linkEP := channel.New(10, defaultMTU, "") + s := stack.New([]string{"fakeNet"}, []string{"fakeTrans"}) + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{"\x00", "\x00", "\x00", 1}}) + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + // Create endpoint and connect to remote address. + wq := waiter.Queue{} + ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil { + t.Fatalf("Connect failed: %v", err) + } + + fakeTrans := s.TransportProtocolInstance(fakeTransNumber).(*fakeTransportProtocol) + + var views [1]buffer.View + // Create buffer that will hold the packet. + buf := buffer.NewView(30) + + // Make sure packet with wrong protocol is not delivered. + buf[0] = 1 + buf[2] = 0 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.packetCount != 0 { + t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 0) + } + + // Make sure packet from the wrong source is not delivered. + buf[0] = 1 + buf[1] = 3 + buf[2] = byte(fakeTransNumber) + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.packetCount != 0 { + t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 0) + } + + // Make sure packet is delivered. + buf[0] = 1 + buf[1] = 2 + buf[2] = byte(fakeTransNumber) + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.packetCount != 1 { + t.Errorf("packetCount = %d, want %d", fakeTrans.packetCount, 1) + } +} + +func TestTransportControlReceive(t *testing.T) { + id, linkEP := channel.New(10, defaultMTU, "") + s := stack.New([]string{"fakeNet"}, []string{"fakeTrans"}) + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{"\x00", "\x00", "\x00", 1}}) + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + // Create endpoint and connect to remote address. + wq := waiter.Queue{} + ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil { + t.Fatalf("Connect failed: %v", err) + } + + fakeTrans := s.TransportProtocolInstance(fakeTransNumber).(*fakeTransportProtocol) + + var views [1]buffer.View + // Create buffer that will hold the control packet. + buf := buffer.NewView(2*fakeNetHeaderLen + 30) + + // Outer packet contains the control protocol number. + buf[0] = 1 + buf[1] = 0xfe + buf[2] = uint8(fakeControlProtocol) + + // Make sure packet with wrong protocol is not delivered. + buf[fakeNetHeaderLen+0] = 0 + buf[fakeNetHeaderLen+1] = 1 + buf[fakeNetHeaderLen+2] = 0 + vv := buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.controlCount != 0 { + t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 0) + } + + // Make sure packet from the wrong source is not delivered. + buf[fakeNetHeaderLen+0] = 3 + buf[fakeNetHeaderLen+1] = 1 + buf[fakeNetHeaderLen+2] = byte(fakeTransNumber) + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.controlCount != 0 { + t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 0) + } + + // Make sure packet is delivered. + buf[fakeNetHeaderLen+0] = 2 + buf[fakeNetHeaderLen+1] = 1 + buf[fakeNetHeaderLen+2] = byte(fakeTransNumber) + vv = buf.ToVectorisedView(views) + linkEP.Inject(fakeNetNumber, &vv) + if fakeTrans.controlCount != 1 { + t.Errorf("controlCount = %d, want %d", fakeTrans.controlCount, 1) + } +} + +func TestTransportSend(t *testing.T) { + id, _ := channel.New(10, defaultMTU, "") + s := stack.New([]string{"fakeNet"}, []string{"fakeTrans"}) + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, fakeNetNumber, "\x01"); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{{"\x00", "\x00", "\x00", 1}}) + + // Create endpoint and bind it. + wq := waiter.Queue{} + ep, err := s.NewEndpoint(fakeTransNumber, fakeNetNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Connect(tcpip.FullAddress{0, "\x02", 0}); err != nil { + t.Fatalf("Connect failed: %v", err) + } + + // Create buffer that will hold the payload. + view := buffer.NewView(30) + _, err = ep.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}) + if err != nil { + t.Fatalf("write failed: %v", err) + } + + fakeNet := s.NetworkProtocolInstance(fakeNetNumber).(*fakeNetworkProtocol) + + if fakeNet.sendPacketCount[2] != 1 { + t.Errorf("sendPacketCount = %d, want %d", fakeNet.sendPacketCount[2], 1) + } +} + +func TestTransportOptions(t *testing.T) { + s := stack.New([]string{"fakeNet"}, []string{"fakeTrans"}) + + // Try an unsupported transport protocol. + if err := s.SetTransportProtocolOption(tcpip.TransportProtocolNumber(99999), fakeTransportGoodOption(false)); err != tcpip.ErrUnknownProtocol { + t.Fatalf("SetTransportProtocolOption(fakeTrans2, blah, false) = %v, want = tcpip.ErrUnknownProtocol", err) + } + + testCases := []struct { + option interface{} + wantErr *tcpip.Error + verifier func(t *testing.T, p stack.TransportProtocol) + }{ + {fakeTransportGoodOption(true), nil, func(t *testing.T, p stack.TransportProtocol) { + t.Helper() + fakeTrans := p.(*fakeTransportProtocol) + if fakeTrans.opts.good != true { + t.Fatalf("fakeTrans.opts.good = false, want = true") + } + var v fakeTransportGoodOption + if err := s.TransportProtocolOption(fakeTransNumber, &v); err != nil { + t.Fatalf("s.TransportProtocolOption(fakeTransNumber, &v) = %v, want = nil, where v is option %T", v, err) + } + if v != true { + t.Fatalf("s.TransportProtocolOption(fakeTransNumber, &v) returned v = %v, want = true", v) + } + + }}, + {fakeTransportBadOption(true), tcpip.ErrUnknownProtocolOption, nil}, + {fakeTransportInvalidValueOption(1), tcpip.ErrInvalidOptionValue, nil}, + } + for _, tc := range testCases { + if got := s.SetTransportProtocolOption(fakeTransNumber, tc.option); got != tc.wantErr { + t.Errorf("s.SetTransportProtocolOption(fakeTrans, %v) = %v, want = %v", tc.option, got, tc.wantErr) + } + if tc.verifier != nil { + tc.verifier(t, s.TransportProtocolInstance(fakeTransNumber)) + } + } +} + +func init() { + stack.RegisterTransportProtocolFactory("fakeTrans", func() stack.TransportProtocol { + return &fakeTransportProtocol{} + }) +} diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go new file mode 100644 index 000000000..f3a94f353 --- /dev/null +++ b/pkg/tcpip/tcpip.go @@ -0,0 +1,499 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tcpip provides the interfaces and related types that users of the +// tcpip stack will use in order to create endpoints used to send and receive +// data over the network stack. +// +// The starting point is the creation and configuration of a stack. A stack can +// be created by calling the New() function of the tcpip/stack/stack package; +// configuring a stack involves creating NICs (via calls to Stack.CreateNIC()), +// adding network addresses (via calls to Stack.AddAddress()), and +// setting a route table (via a call to Stack.SetRouteTable()). +// +// Once a stack is configured, endpoints can be created by calling +// Stack.NewEndpoint(). Such endpoints can be used to send/receive data, connect +// to peers, listen for connections, accept connections, etc., depending on the +// transport protocol selected. +package tcpip + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Error represents an error in the netstack error space. Using a special type +// ensures that errors outside of this space are not accidentally introduced. +type Error struct { + string +} + +// String implements fmt.Stringer.String. +func (e *Error) String() string { + return e.string +} + +// Errors that can be returned by the network stack. +var ( + ErrUnknownProtocol = &Error{"unknown protocol"} + ErrUnknownNICID = &Error{"unknown nic id"} + ErrUnknownProtocolOption = &Error{"unknown option for protocol"} + ErrDuplicateNICID = &Error{"duplicate nic id"} + ErrDuplicateAddress = &Error{"duplicate address"} + ErrNoRoute = &Error{"no route"} + ErrBadLinkEndpoint = &Error{"bad link layer endpoint"} + ErrAlreadyBound = &Error{"endpoint already bound"} + ErrInvalidEndpointState = &Error{"endpoint is in invalid state"} + ErrAlreadyConnecting = &Error{"endpoint is already connecting"} + ErrAlreadyConnected = &Error{"endpoint is already connected"} + ErrNoPortAvailable = &Error{"no ports are available"} + ErrPortInUse = &Error{"port is in use"} + ErrBadLocalAddress = &Error{"bad local address"} + ErrClosedForSend = &Error{"endpoint is closed for send"} + ErrClosedForReceive = &Error{"endpoint is closed for receive"} + ErrWouldBlock = &Error{"operation would block"} + ErrConnectionRefused = &Error{"connection was refused"} + ErrTimeout = &Error{"operation timed out"} + ErrAborted = &Error{"operation aborted"} + ErrConnectStarted = &Error{"connection attempt started"} + ErrDestinationRequired = &Error{"destination address is required"} + ErrNotSupported = &Error{"operation not supported"} + ErrQueueSizeNotSupported = &Error{"queue size querying not supported"} + ErrNotConnected = &Error{"endpoint not connected"} + ErrConnectionReset = &Error{"connection reset by peer"} + ErrConnectionAborted = &Error{"connection aborted"} + ErrNoSuchFile = &Error{"no such file"} + ErrInvalidOptionValue = &Error{"invalid option value specified"} + ErrNoLinkAddress = &Error{"no remote link address"} + ErrBadAddress = &Error{"bad address"} +) + +// Errors related to Subnet +var ( + errSubnetLengthMismatch = errors.New("subnet length of address and mask differ") + errSubnetAddressMasked = errors.New("subnet address has bits set outside the mask") +) + +// Address is a byte slice cast as a string that represents the address of a +// network node. Or, in the case of unix endpoints, it may represent a path. +type Address string + +// AddressMask is a bitmask for an address. +type AddressMask string + +// Subnet is a subnet defined by its address and mask. +type Subnet struct { + address Address + mask AddressMask +} + +// NewSubnet creates a new Subnet, checking that the address and mask are the same length. +func NewSubnet(a Address, m AddressMask) (Subnet, error) { + if len(a) != len(m) { + return Subnet{}, errSubnetLengthMismatch + } + for i := 0; i < len(a); i++ { + if a[i]&^m[i] != 0 { + return Subnet{}, errSubnetAddressMasked + } + } + return Subnet{a, m}, nil +} + +// Contains returns true iff the address is of the same length and matches the +// subnet address and mask. +func (s *Subnet) Contains(a Address) bool { + if len(a) != len(s.address) { + return false + } + for i := 0; i < len(a); i++ { + if a[i]&s.mask[i] != s.address[i] { + return false + } + } + return true +} + +// ID returns the subnet ID. +func (s *Subnet) ID() Address { + return s.address +} + +// Bits returns the number of ones (network bits) and zeros (host bits) in the +// subnet mask. +func (s *Subnet) Bits() (ones int, zeros int) { + for _, b := range []byte(s.mask) { + for i := uint(0); i < 8; i++ { + if b&(1<<i) == 0 { + zeros++ + } else { + ones++ + } + } + } + return +} + +// Prefix returns the number of bits before the first host bit. +func (s *Subnet) Prefix() int { + for i, b := range []byte(s.mask) { + for j := 7; j >= 0; j-- { + if b&(1<<uint(j)) == 0 { + return i*8 + 7 - j + } + } + } + return len(s.mask) * 8 +} + +// NICID is a number that uniquely identifies a NIC. +type NICID int32 + +// ShutdownFlags represents flags that can be passed to the Shutdown() method +// of the Endpoint interface. +type ShutdownFlags int + +// Values of the flags that can be passed to the Shutdown() method. They can +// be OR'ed together. +const ( + ShutdownRead ShutdownFlags = 1 << iota + ShutdownWrite +) + +// FullAddress represents a full transport node address, as required by the +// Connect() and Bind() methods. +type FullAddress struct { + // NIC is the ID of the NIC this address refers to. + // + // This may not be used by all endpoint types. + NIC NICID + + // Addr is the network address. + Addr Address + + // Port is the transport port. + // + // This may not be used by all endpoint types. + Port uint16 +} + +// Payload provides an interface around data that is being sent to an endpoint. +// This allows the endpoint to request the amount of data it needs based on +// internal buffers without exposing them. 'p.Get(p.Size())' reads all the data. +type Payload interface { + // Get returns a slice containing exactly 'min(size, p.Size())' bytes. + Get(size int) ([]byte, *Error) + + // Size returns the payload size. + Size() int +} + +// SlicePayload implements Payload on top of slices for convenience. +type SlicePayload []byte + +// Get implements Payload. +func (s SlicePayload) Get(size int) ([]byte, *Error) { + if size > s.Size() { + size = s.Size() + } + return s[:size], nil +} + +// Size implements Payload. +func (s SlicePayload) Size() int { + return len(s) +} + +// Endpoint is the interface implemented by transport protocols (e.g., tcp, udp) +// that exposes functionality like read, write, connect, etc. to users of the +// networking stack. +type Endpoint interface { + // Close puts the endpoint in a closed state and frees all resources + // associated with it. + Close() + + // Read reads data from the endpoint and optionally returns the sender. + // This method does not block if there is no data pending. + // It will also either return an error or data, never both. + Read(*FullAddress) (buffer.View, *Error) + + // Write writes data to the endpoint's peer. This method does not block if + // the data cannot be written. + // + // Unlike io.Writer.Write, Endpoint.Write transfers ownership of any bytes + // successfully written to the Endpoint. That is, if a call to + // Write(SlicePayload{data}) returns (n, err), it may retain data[:n], and + // the caller should not use data[:n] after Write returns. + // + // Note that unlike io.Writer.Write, it is not an error for Write to + // perform a partial write. + Write(Payload, WriteOptions) (uintptr, *Error) + + // Peek reads data without consuming it from the endpoint. + // + // This method does not block if there is no data pending. + Peek([][]byte) (uintptr, *Error) + + // Connect connects the endpoint to its peer. Specifying a NIC is + // optional. + // + // There are three classes of return values: + // nil -- the attempt to connect succeeded. + // ErrConnectStarted/ErrAlreadyConnecting -- the connect attempt started + // but hasn't completed yet. In this case, the caller must call Connect + // or GetSockOpt(ErrorOption) when the endpoint becomes writable to + // get the actual result. The first call to Connect after the socket has + // connected returns nil. Calling connect again results in ErrAlreadyConnected. + // Anything else -- the attempt to connect failed. + Connect(address FullAddress) *Error + + // Shutdown closes the read and/or write end of the endpoint connection + // to its peer. + Shutdown(flags ShutdownFlags) *Error + + // Listen puts the endpoint in "listen" mode, which allows it to accept + // new connections. + Listen(backlog int) *Error + + // Accept returns a new endpoint if a peer has established a connection + // to an endpoint previously set to listen mode. This method does not + // block if no new connections are available. + // + // The returned Queue is the wait queue for the newly created endpoint. + Accept() (Endpoint, *waiter.Queue, *Error) + + // Bind binds the endpoint to a specific local address and port. + // Specifying a NIC is optional. + // + // An optional commit function will be executed atomically with respect + // to binding the endpoint. If this returns an error, the bind will not + // occur and the error will be propagated back to the caller. + Bind(address FullAddress, commit func() *Error) *Error + + // GetLocalAddress returns the address to which the endpoint is bound. + GetLocalAddress() (FullAddress, *Error) + + // GetRemoteAddress returns the address to which the endpoint is + // connected. + GetRemoteAddress() (FullAddress, *Error) + + // Readiness returns the current readiness of the endpoint. For example, + // if waiter.EventIn is set, the endpoint is immediately readable. + Readiness(mask waiter.EventMask) waiter.EventMask + + // SetSockOpt sets a socket option. opt should be one of the *Option types. + SetSockOpt(opt interface{}) *Error + + // GetSockOpt gets a socket option. opt should be a pointer to one of the + // *Option types. + GetSockOpt(opt interface{}) *Error +} + +// WriteOptions contains options for Endpoint.Write. +type WriteOptions struct { + // If To is not nil, write to the given address instead of the endpoint's + // peer. + To *FullAddress + + // More has the same semantics as Linux's MSG_MORE. + More bool + + // EndOfRecord has the same semantics as Linux's MSG_EOR. + EndOfRecord bool +} + +// ErrorOption is used in GetSockOpt to specify that the last error reported by +// the endpoint should be cleared and returned. +type ErrorOption struct{} + +// SendBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the send +// buffer size option. +type SendBufferSizeOption int + +// ReceiveBufferSizeOption is used by SetSockOpt/GetSockOpt to specify the +// receive buffer size option. +type ReceiveBufferSizeOption int + +// SendQueueSizeOption is used in GetSockOpt to specify that the number of +// unread bytes in the output buffer should be returned. +type SendQueueSizeOption int + +// ReceiveQueueSizeOption is used in GetSockOpt to specify that the number of +// unread bytes in the input buffer should be returned. +type ReceiveQueueSizeOption int + +// V6OnlyOption is used by SetSockOpt/GetSockOpt to specify whether an IPv6 +// socket is to be restricted to sending and receiving IPv6 packets only. +type V6OnlyOption int + +// NoDelayOption is used by SetSockOpt/GetSockOpt to specify if data should be +// sent out immediately by the transport protocol. For TCP, it determines if the +// Nagle algorithm is on or off. +type NoDelayOption int + +// ReuseAddressOption is used by SetSockOpt/GetSockOpt to specify whether Bind() +// should allow reuse of local address. +type ReuseAddressOption int + +// PasscredOption is used by SetSockOpt/GetSockOpt to specify whether +// SCM_CREDENTIALS socket control messages are enabled. +// +// Only supported on Unix sockets. +type PasscredOption int + +// TCPInfoOption is used by GetSockOpt to expose TCP statistics. +// +// TODO: Add and populate stat fields. +type TCPInfoOption struct{} + +// Route is a row in the routing table. It specifies through which NIC (and +// gateway) sets of packets should be routed. A row is considered viable if the +// masked target address matches the destination adddress in the row. +type Route struct { + // Destination is the address that must be matched against the masked + // target address to check if this row is viable. + Destination Address + + // Mask specifies which bits of the Destination and the target address + // must match for this row to be viable. + Mask Address + + // Gateway is the gateway to be used if this row is viable. + Gateway Address + + // NIC is the id of the nic to be used if this row is viable. + NIC NICID +} + +// Match determines if r is viable for the given destination address. +func (r *Route) Match(addr Address) bool { + if len(addr) != len(r.Destination) { + return false + } + + for i := 0; i < len(r.Destination); i++ { + if (addr[i] & r.Mask[i]) != r.Destination[i] { + return false + } + } + + return true +} + +// LinkEndpointID represents a data link layer endpoint. +type LinkEndpointID uint64 + +// TransportProtocolNumber is the number of a transport protocol. +type TransportProtocolNumber uint32 + +// NetworkProtocolNumber is the number of a network protocol. +type NetworkProtocolNumber uint32 + +// Stats holds statistics about the networking stack. +type Stats struct { + // UnknownProtocolRcvdPackets is the number of packets received by the + // stack that were for an unknown or unsupported protocol. + UnknownProtocolRcvdPackets uint64 + + // UnknownNetworkEndpointRcvdPackets is the number of packets received + // by the stack that were for a supported network protocol, but whose + // destination address didn't having a matching endpoint. + UnknownNetworkEndpointRcvdPackets uint64 + + // MalformedRcvPackets is the number of packets received by the stack + // that were deemed malformed. + MalformedRcvdPackets uint64 + + // DroppedPackets is the number of packets dropped due to full queues. + DroppedPackets uint64 +} + +// String implements the fmt.Stringer interface. +func (a Address) String() string { + switch len(a) { + case 4: + return fmt.Sprintf("%d.%d.%d.%d", int(a[0]), int(a[1]), int(a[2]), int(a[3])) + default: + return fmt.Sprintf("%x", []byte(a)) + } +} + +// To4 converts the IPv4 address to a 4-byte representation. +// If the address is not an IPv4 address, To4 returns "". +func (a Address) To4() Address { + const ( + ipv4len = 4 + ipv6len = 16 + ) + if len(a) == ipv4len { + return a + } + if len(a) == ipv6len && + isZeros(a[0:10]) && + a[10] == 0xff && + a[11] == 0xff { + return a[12:16] + } + return "" +} + +// isZeros reports whether a is all zeros. +func isZeros(a Address) bool { + for i := 0; i < len(a); i++ { + if a[i] != 0 { + return false + } + } + return true +} + +// LinkAddress is a byte slice cast as a string that represents a link address. +// It is typically a 6-byte MAC address. +type LinkAddress string + +// String implements the fmt.Stringer interface. +func (a LinkAddress) String() string { + switch len(a) { + case 6: + return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", a[0], a[1], a[2], a[3], a[4], a[5]) + default: + return fmt.Sprintf("%x", []byte(a)) + } +} + +// ParseMACAddress parses an IEEE 802 address. +// +// It must be in the format aa:bb:cc:dd:ee:ff or aa-bb-cc-dd-ee-ff. +func ParseMACAddress(s string) (LinkAddress, error) { + parts := strings.FieldsFunc(s, func(c rune) bool { + return c == ':' || c == '-' + }) + if len(parts) != 6 { + return "", fmt.Errorf("inconsistent parts: %s", s) + } + addr := make([]byte, 0, len(parts)) + for _, part := range parts { + u, err := strconv.ParseUint(part, 16, 8) + if err != nil { + return "", fmt.Errorf("invalid hex digits: %s", s) + } + addr = append(addr, byte(u)) + } + return LinkAddress(addr), nil +} + +// ProtocolAddress is an address and the network protocol it is associated +// with. +type ProtocolAddress struct { + // Protocol is the protocol of the address. + Protocol NetworkProtocolNumber + + // Address is a network address. + Address Address +} diff --git a/pkg/tcpip/tcpip_test.go b/pkg/tcpip/tcpip_test.go new file mode 100644 index 000000000..fd4d8346f --- /dev/null +++ b/pkg/tcpip/tcpip_test.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcpip + +import ( + "testing" +) + +func TestSubnetContains(t *testing.T) { + tests := []struct { + s Address + m AddressMask + a Address + want bool + }{ + {"\xa0", "\xf0", "\x90", false}, + {"\xa0", "\xf0", "\xa0", true}, + {"\xa0", "\xf0", "\xa5", true}, + {"\xa0", "\xf0", "\xaf", true}, + {"\xa0", "\xf0", "\xb0", false}, + {"\xa0", "\xf0", "", false}, + {"\xa0", "\xf0", "\xa0\x00", false}, + {"\xc2\x80", "\xff\xf0", "\xc2\x80", true}, + {"\xc2\x80", "\xff\xf0", "\xc2\x00", false}, + {"\xc2\x00", "\xff\xf0", "\xc2\x00", true}, + {"\xc2\x00", "\xff\xf0", "\xc2\x80", false}, + } + for _, tt := range tests { + s, err := NewSubnet(tt.s, tt.m) + if err != nil { + t.Errorf("NewSubnet(%v, %v) = %v", tt.s, tt.m, err) + continue + } + if got := s.Contains(tt.a); got != tt.want { + t.Errorf("Subnet(%v).Contains(%v) = %v, want %v", s, tt.a, got, tt.want) + } + } +} + +func TestSubnetBits(t *testing.T) { + tests := []struct { + a AddressMask + want1 int + want0 int + }{ + {"\x00", 0, 8}, + {"\x00\x00", 0, 16}, + {"\x36", 4, 4}, + {"\x5c", 4, 4}, + {"\x5c\x5c", 8, 8}, + {"\x5c\x36", 8, 8}, + {"\x36\x5c", 8, 8}, + {"\x36\x36", 8, 8}, + {"\xff", 8, 0}, + {"\xff\xff", 16, 0}, + } + for _, tt := range tests { + s := &Subnet{mask: tt.a} + got1, got0 := s.Bits() + if got1 != tt.want1 || got0 != tt.want0 { + t.Errorf("Subnet{mask: %x}.Bits() = %d, %d, want %d, %d", tt.a, got1, got0, tt.want1, tt.want0) + } + } +} + +func TestSubnetPrefix(t *testing.T) { + tests := []struct { + a AddressMask + want int + }{ + {"\x00", 0}, + {"\x00\x00", 0}, + {"\x36", 0}, + {"\x86", 1}, + {"\xc5", 2}, + {"\xff\x00", 8}, + {"\xff\x36", 8}, + {"\xff\x8c", 9}, + {"\xff\xc8", 10}, + {"\xff", 8}, + {"\xff\xff", 16}, + } + for _, tt := range tests { + s := &Subnet{mask: tt.a} + got := s.Prefix() + if got != tt.want { + t.Errorf("Subnet{mask: %x}.Bits() = %d want %d", tt.a, got, tt.want) + } + } +} + +func TestSubnetCreation(t *testing.T) { + tests := []struct { + a Address + m AddressMask + want error + }{ + {"\xa0", "\xf0", nil}, + {"\xa0\xa0", "\xf0", errSubnetLengthMismatch}, + {"\xaa", "\xf0", errSubnetAddressMasked}, + {"", "", nil}, + } + for _, tt := range tests { + if _, err := NewSubnet(tt.a, tt.m); err != tt.want { + t.Errorf("NewSubnet(%v, %v) = %v, want %v", tt.a, tt.m, err, tt.want) + } + } +} + +func TestRouteMatch(t *testing.T) { + tests := []struct { + d Address + m Address + a Address + want bool + }{ + {"\xc2\x80", "\xff\xf0", "\xc2\x80", true}, + {"\xc2\x80", "\xff\xf0", "\xc2\x00", false}, + {"\xc2\x00", "\xff\xf0", "\xc2\x00", true}, + {"\xc2\x00", "\xff\xf0", "\xc2\x80", false}, + } + for _, tt := range tests { + r := Route{Destination: tt.d, Mask: tt.m} + if got := r.Match(tt.a); got != tt.want { + t.Errorf("Route(%v).Match(%v) = %v, want %v", r, tt.a, got, tt.want) + } + } +} diff --git a/pkg/tcpip/transport/queue/BUILD b/pkg/tcpip/transport/queue/BUILD new file mode 100644 index 000000000..162af574c --- /dev/null +++ b/pkg/tcpip/transport/queue/BUILD @@ -0,0 +1,29 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "queue_state", + srcs = [ + "queue.go", + ], + out = "queue_state.go", + package = "queue", +) + +go_library( + name = "queue", + srcs = [ + "queue.go", + "queue_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/queue", + visibility = ["//:sandbox"], + deps = [ + "//pkg/ilist", + "//pkg/state", + "//pkg/tcpip", + "//pkg/waiter", + ], +) diff --git a/pkg/tcpip/transport/queue/queue.go b/pkg/tcpip/transport/queue/queue.go new file mode 100644 index 000000000..2d2918504 --- /dev/null +++ b/pkg/tcpip/transport/queue/queue.go @@ -0,0 +1,166 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package queue provides the implementation of buffer queue +// and interface of queue entry with Length method. +package queue + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Entry implements Linker interface and has both Length and Release methods. +type Entry interface { + ilist.Linker + Length() int64 + Release() + Peek() Entry +} + +// Queue is a buffer queue. +type Queue struct { + ReaderQueue *waiter.Queue + WriterQueue *waiter.Queue + + mu sync.Mutex `state:"nosave"` + closed bool + used int64 + limit int64 + dataList ilist.List +} + +// New allocates and initializes a new queue. +func New(ReaderQueue *waiter.Queue, WriterQueue *waiter.Queue, limit int64) *Queue { + return &Queue{ReaderQueue: ReaderQueue, WriterQueue: WriterQueue, limit: limit} +} + +// Close closes q for reading and writing. It is immediately not writable and +// will become unreadble will no more data is pending. +// +// Both the read and write queues must be notified after closing: +// q.ReaderQueue.Notify(waiter.EventIn) +// q.WriterQueue.Notify(waiter.EventOut) +func (q *Queue) Close() { + q.mu.Lock() + q.closed = true + q.mu.Unlock() +} + +// Reset empties the queue and Releases all of the Entries. +// +// Both the read and write queues must be notified after resetting: +// q.ReaderQueue.Notify(waiter.EventIn) +// q.WriterQueue.Notify(waiter.EventOut) +func (q *Queue) Reset() { + q.mu.Lock() + for cur := q.dataList.Front(); cur != nil; cur = cur.Next() { + cur.(Entry).Release() + } + q.dataList.Reset() + q.used = 0 + q.mu.Unlock() +} + +// IsReadable determines if q is currently readable. +func (q *Queue) IsReadable() bool { + q.mu.Lock() + defer q.mu.Unlock() + + return q.closed || q.dataList.Front() != nil +} + +// IsWritable determines if q is currently writable. +func (q *Queue) IsWritable() bool { + q.mu.Lock() + defer q.mu.Unlock() + + return q.closed || q.used < q.limit +} + +// Enqueue adds an entry to the data queue if room is available. +// +// If notify is true, ReaderQueue.Notify must be called: +// q.ReaderQueue.Notify(waiter.EventIn) +func (q *Queue) Enqueue(e Entry) (notify bool, err *tcpip.Error) { + q.mu.Lock() + + if q.closed { + q.mu.Unlock() + return false, tcpip.ErrClosedForSend + } + + if q.used >= q.limit { + q.mu.Unlock() + return false, tcpip.ErrWouldBlock + } + + notify = q.dataList.Front() == nil + q.used += e.Length() + q.dataList.PushBack(e) + + q.mu.Unlock() + + return notify, nil +} + +// Dequeue removes the first entry in the data queue, if one exists. +// +// If notify is true, WriterQueue.Notify must be called: +// q.WriterQueue.Notify(waiter.EventOut) +func (q *Queue) Dequeue() (e Entry, notify bool, err *tcpip.Error) { + q.mu.Lock() + + if q.dataList.Front() == nil { + err := tcpip.ErrWouldBlock + if q.closed { + err = tcpip.ErrClosedForReceive + } + q.mu.Unlock() + + return nil, false, err + } + + notify = q.used >= q.limit + + e = q.dataList.Front().(Entry) + q.dataList.Remove(e) + q.used -= e.Length() + + notify = notify && q.used < q.limit + + q.mu.Unlock() + + return e, notify, nil +} + +// Peek returns the first entry in the data queue, if one exists. +func (q *Queue) Peek() (Entry, *tcpip.Error) { + q.mu.Lock() + defer q.mu.Unlock() + + if q.dataList.Front() == nil { + err := tcpip.ErrWouldBlock + if q.closed { + err = tcpip.ErrClosedForReceive + } + return nil, err + } + + return q.dataList.Front().(Entry).Peek(), nil +} + +// QueuedSize returns the number of bytes currently in the queue, that is, the +// number of readable bytes. +func (q *Queue) QueuedSize() int64 { + return q.used +} + +// MaxQueueSize returns the maximum number of bytes storable in the queue. +func (q *Queue) MaxQueueSize() int64 { + return q.limit +} diff --git a/pkg/tcpip/transport/tcp/BUILD b/pkg/tcpip/transport/tcp/BUILD new file mode 100644 index 000000000..d0eb8b8bd --- /dev/null +++ b/pkg/tcpip/transport/tcp/BUILD @@ -0,0 +1,97 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "tcp_state", + srcs = [ + "endpoint.go", + "endpoint_state.go", + "rcv.go", + "segment_heap.go", + "snd.go", + "tcp_segment_list.go", + ], + out = "tcp_state.go", + package = "tcp", +) + +go_template_instance( + name = "tcp_segment_list", + out = "tcp_segment_list.go", + package = "tcp", + prefix = "segment", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*segment", + }, +) + +go_library( + name = "tcp", + srcs = [ + "accept.go", + "connect.go", + "endpoint.go", + "endpoint_state.go", + "forwarder.go", + "protocol.go", + "rcv.go", + "sack.go", + "segment.go", + "segment_heap.go", + "segment_queue.go", + "snd.go", + "tcp_segment_list.go", + "tcp_state.go", + "timer.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp", + visibility = ["//visibility:public"], + deps = [ + "//pkg/sleep", + "//pkg/state", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/seqnum", + "//pkg/tcpip/stack", + "//pkg/tmutex", + "//pkg/waiter", + ], +) + +go_test( + name = "tcp_test", + size = "small", + srcs = [ + "dual_stack_test.go", + "tcp_sack_test.go", + "tcp_test.go", + "tcp_timestamp_test.go", + ], + deps = [ + ":tcp", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/checker", + "//pkg/tcpip/header", + "//pkg/tcpip/link/loopback", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/seqnum", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp/testing/context", + "//pkg/waiter", + ], +) + +filegroup( + name = "autogen", + srcs = [ + "tcp_segment_list.go", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go new file mode 100644 index 000000000..9a5b13066 --- /dev/null +++ b/pkg/tcpip/transport/tcp/accept.go @@ -0,0 +1,407 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "hash" + "io" + "sync" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // tsLen is the length, in bits, of the timestamp in the SYN cookie. + tsLen = 8 + + // tsMask is a mask for timestamp values (i.e., tsLen bits). + tsMask = (1 << tsLen) - 1 + + // tsOffset is the offset, in bits, of the timestamp in the SYN cookie. + tsOffset = 24 + + // hashMask is the mask for hash values (i.e., tsOffset bits). + hashMask = (1 << tsOffset) - 1 + + // maxTSDiff is the maximum allowed difference between a received cookie + // timestamp and the current timestamp. If the difference is greater + // than maxTSDiff, the cookie is expired. + maxTSDiff = 2 +) + +var ( + // SynRcvdCountThreshold is the global maximum number of connections + // that are allowed to be in SYN-RCVD state before TCP starts using SYN + // cookies to accept connections. + // + // It is an exported variable only for testing, and should not otherwise + // be used by importers of this package. + SynRcvdCountThreshold uint64 = 1000 + + // mssTable is a slice containing the possible MSS values that we + // encode in the SYN cookie with two bits. + mssTable = []uint16{536, 1300, 1440, 1460} +) + +func encodeMSS(mss uint16) uint32 { + for i := len(mssTable) - 1; i > 0; i-- { + if mss >= mssTable[i] { + return uint32(i) + } + } + return 0 +} + +// syncRcvdCount is the number of endpoints in the SYN-RCVD state. The value is +// protected by a mutex so that we can increment only when it's guaranteed not +// to go above a threshold. +var synRcvdCount struct { + sync.Mutex + value uint64 +} + +// listenContext is used by a listening endpoint to store state used while +// listening for connections. This struct is allocated by the listen goroutine +// and must not be accessed or have its methods called concurrently as they +// may mutate the stored objects. +type listenContext struct { + stack *stack.Stack + rcvWnd seqnum.Size + nonce [2][sha1.BlockSize]byte + + hasherMu sync.Mutex + hasher hash.Hash + v6only bool + netProto tcpip.NetworkProtocolNumber +} + +// timeStamp returns an 8-bit timestamp with a granularity of 64 seconds. +func timeStamp() uint32 { + return uint32(time.Now().Unix()>>6) & tsMask +} + +// incSynRcvdCount tries to increment the global number of endpoints in SYN-RCVD +// state. It succeeds if the increment doesn't make the count go beyond the +// threshold, and fails otherwise. +func incSynRcvdCount() bool { + synRcvdCount.Lock() + defer synRcvdCount.Unlock() + + if synRcvdCount.value >= SynRcvdCountThreshold { + return false + } + + synRcvdCount.value++ + + return true +} + +// decSynRcvdCount atomically decrements the global number of endpoints in +// SYN-RCVD state. It must only be called if a previous call to incSynRcvdCount +// succeeded. +func decSynRcvdCount() { + synRcvdCount.Lock() + defer synRcvdCount.Unlock() + + synRcvdCount.value-- +} + +// newListenContext creates a new listen context. +func newListenContext(stack *stack.Stack, rcvWnd seqnum.Size, v6only bool, netProto tcpip.NetworkProtocolNumber) *listenContext { + l := &listenContext{ + stack: stack, + rcvWnd: rcvWnd, + hasher: sha1.New(), + v6only: v6only, + netProto: netProto, + } + + rand.Read(l.nonce[0][:]) + rand.Read(l.nonce[1][:]) + + return l +} + +// cookieHash calculates the cookieHash for the given id, timestamp and nonce +// index. The hash is used to create and validate cookies. +func (l *listenContext) cookieHash(id stack.TransportEndpointID, ts uint32, nonceIndex int) uint32 { + + // Initialize block with fixed-size data: local ports and v. + var payload [8]byte + binary.BigEndian.PutUint16(payload[0:], id.LocalPort) + binary.BigEndian.PutUint16(payload[2:], id.RemotePort) + binary.BigEndian.PutUint32(payload[4:], ts) + + // Feed everything to the hasher. + l.hasherMu.Lock() + l.hasher.Reset() + l.hasher.Write(payload[:]) + l.hasher.Write(l.nonce[nonceIndex][:]) + io.WriteString(l.hasher, string(id.LocalAddress)) + io.WriteString(l.hasher, string(id.RemoteAddress)) + + // Finalize the calculation of the hash and return the first 4 bytes. + h := make([]byte, 0, sha1.Size) + h = l.hasher.Sum(h) + l.hasherMu.Unlock() + + return binary.BigEndian.Uint32(h[:]) +} + +// createCookie creates a SYN cookie for the given id and incoming sequence +// number. +func (l *listenContext) createCookie(id stack.TransportEndpointID, seq seqnum.Value, data uint32) seqnum.Value { + ts := timeStamp() + v := l.cookieHash(id, 0, 0) + uint32(seq) + (ts << tsOffset) + v += (l.cookieHash(id, ts, 1) + data) & hashMask + return seqnum.Value(v) +} + +// isCookieValid checks if the supplied cookie is valid for the given id and +// sequence number. If it is, it also returns the data originally encoded in the +// cookie when createCookie was called. +func (l *listenContext) isCookieValid(id stack.TransportEndpointID, cookie seqnum.Value, seq seqnum.Value) (uint32, bool) { + ts := timeStamp() + v := uint32(cookie) - l.cookieHash(id, 0, 0) - uint32(seq) + cookieTS := v >> tsOffset + if ((ts - cookieTS) & tsMask) > maxTSDiff { + return 0, false + } + + return (v - l.cookieHash(id, cookieTS, 1)) & hashMask, true +} + +// createConnectedEndpoint creates a new connected endpoint, with the connection +// parameters given by the arguments. +func (l *listenContext) createConnectedEndpoint(s *segment, iss seqnum.Value, irs seqnum.Value, rcvdSynOpts *header.TCPSynOptions) (*endpoint, *tcpip.Error) { + // Create a new endpoint. + netProto := l.netProto + if netProto == 0 { + netProto = s.route.NetProto + } + n := newEndpoint(l.stack, netProto, nil) + n.v6only = l.v6only + n.id = s.id + n.boundNICID = s.route.NICID() + n.route = s.route.Clone() + n.effectiveNetProtos = []tcpip.NetworkProtocolNumber{s.route.NetProto} + n.rcvBufSize = int(l.rcvWnd) + + n.maybeEnableTimestamp(rcvdSynOpts) + n.maybeEnableSACKPermitted(rcvdSynOpts) + + // Register new endpoint so that packets are routed to it. + if err := n.stack.RegisterTransportEndpoint(n.boundNICID, n.effectiveNetProtos, ProtocolNumber, n.id, n); err != nil { + n.Close() + return nil, err + } + + n.isRegistered = true + n.state = stateConnected + + // Create sender and receiver. + // + // The receiver at least temporarily has a zero receive window scale, + // but the caller may change it (before starting the protocol loop). + n.snd = newSender(n, iss, irs, s.window, rcvdSynOpts.MSS, rcvdSynOpts.WS) + n.rcv = newReceiver(n, irs, l.rcvWnd, 0) + + return n, nil +} + +// createEndpoint creates a new endpoint in connected state and then performs +// the TCP 3-way handshake. +func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *header.TCPSynOptions) (*endpoint, *tcpip.Error) { + // Create new endpoint. + irs := s.sequenceNumber + cookie := l.createCookie(s.id, irs, encodeMSS(opts.MSS)) + ep, err := l.createConnectedEndpoint(s, cookie, irs, opts) + if err != nil { + return nil, err + } + + // Perform the 3-way handshake. + h, err := newHandshake(ep, l.rcvWnd) + if err != nil { + ep.Close() + return nil, err + } + + h.resetToSynRcvd(cookie, irs, opts) + if err := h.execute(); err != nil { + ep.Close() + return nil, err + } + + // Update the receive window scaling. We can't do it before the + // handshake because it's possible that the peer doesn't support window + // scaling. + ep.rcv.rcvWndScale = h.effectiveRcvWndScale() + + return ep, nil +} + +// deliverAccepted delivers the newly-accepted endpoint to the listener. If the +// endpoint has transitioned out of the listen state, the new endpoint is closed +// instead. +func (e *endpoint) deliverAccepted(n *endpoint) { + e.mu.RLock() + if e.state == stateListen { + e.acceptedChan <- n + e.waiterQueue.Notify(waiter.EventIn) + } else { + n.Close() + } + e.mu.RUnlock() +} + +// handleSynSegment is called in its own goroutine once the listening endpoint +// receives a SYN segment. It is responsible for completing the handshake and +// queueing the new endpoint for acceptance. +// +// A limited number of these goroutines are allowed before TCP starts using SYN +// cookies to accept connections. +func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header.TCPSynOptions) { + defer decSynRcvdCount() + defer s.decRef() + + n, err := ctx.createEndpointAndPerformHandshake(s, opts) + if err != nil { + return + } + + e.deliverAccepted(n) +} + +// handleListenSegment is called when a listening endpoint receives a segment +// and needs to handle it. +func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { + switch s.flags { + case flagSyn: + opts := parseSynSegmentOptions(s) + if incSynRcvdCount() { + s.incRef() + go e.handleSynSegment(ctx, s, &opts) // S/R-FIXME + } else { + cookie := ctx.createCookie(s.id, s.sequenceNumber, encodeMSS(opts.MSS)) + // Send SYN with window scaling because we currently + // dont't encode this information in the cookie. + // + // Enable Timestamp option if the original syn did have + // the timestamp option specified. + synOpts := header.TCPSynOptions{ + WS: -1, + TS: opts.TS, + TSVal: tcpTimeStamp(timeStampOffset()), + TSEcr: opts.TSVal, + } + sendSynTCP(&s.route, s.id, flagSyn|flagAck, cookie, s.sequenceNumber+1, ctx.rcvWnd, synOpts) + } + + case flagAck: + if data, ok := ctx.isCookieValid(s.id, s.ackNumber-1, s.sequenceNumber-1); ok && int(data) < len(mssTable) { + // Create newly accepted endpoint and deliver it. + rcvdSynOptions := &header.TCPSynOptions{ + MSS: mssTable[data], + // Disable Window scaling as original SYN is + // lost. + WS: -1, + } + // When syn cookies are in use we enable timestamp only + // if the ack specifies the timestamp option assuming + // that the other end did in fact negotiate the + // timestamp option in the original SYN. + if s.parsedOptions.TS { + rcvdSynOptions.TS = true + rcvdSynOptions.TSVal = s.parsedOptions.TSVal + rcvdSynOptions.TSEcr = s.parsedOptions.TSEcr + } + n, err := ctx.createConnectedEndpoint(s, s.ackNumber-1, s.sequenceNumber-1, rcvdSynOptions) + if err == nil { + // clear the tsOffset for the newly created + // endpoint as the Timestamp was already + // randomly offset when the original SYN-ACK was + // sent above. + n.tsOffset = 0 + e.deliverAccepted(n) + } + } + } +} + +// protocolListenLoop is the main loop of a listening TCP endpoint. It runs in +// its own goroutine and is responsible for handling connection requests. +func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error { + defer func() { + // Mark endpoint as closed. This will prevent goroutines running + // handleSynSegment() from attempting to queue new connections + // to the endpoint. + e.mu.Lock() + e.state = stateClosed + e.mu.Unlock() + + // Notify waiters that the endpoint is shutdown. + e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut) + + // Do cleanup if needed. + e.completeWorker() + }() + + e.mu.Lock() + v6only := e.v6only + e.mu.Unlock() + + ctx := newListenContext(e.stack, rcvWnd, v6only, e.netProto) + + s := sleep.Sleeper{} + s.AddWaker(&e.notificationWaker, wakerForNotification) + s.AddWaker(&e.newSegmentWaker, wakerForNewSegment) + for { + switch index, _ := s.Fetch(true); index { + case wakerForNotification: + n := e.fetchNotifications() + if n¬ifyClose != 0 { + return nil + } + if n¬ifyDrain != 0 { + for s := e.segmentQueue.dequeue(); s != nil; s = e.segmentQueue.dequeue() { + e.handleListenSegment(ctx, s) + s.decRef() + } + e.drainDone <- struct{}{} + return nil + } + + case wakerForNewSegment: + // Process at most maxSegmentsPerWake segments. + mayRequeue := true + for i := 0; i < maxSegmentsPerWake; i++ { + s := e.segmentQueue.dequeue() + if s == nil { + mayRequeue = false + break + } + + e.handleListenSegment(ctx, s) + s.decRef() + } + + // If the queue is not empty, make sure we'll wake up + // in the next iteration. + if mayRequeue && !e.segmentQueue.empty() { + e.newSegmentWaker.Assert() + } + } + } +} diff --git a/pkg/tcpip/transport/tcp/connect.go b/pkg/tcpip/transport/tcp/connect.go new file mode 100644 index 000000000..4d20f4d3f --- /dev/null +++ b/pkg/tcpip/transport/tcp/connect.go @@ -0,0 +1,953 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "crypto/rand" + "sync" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// maxSegmentsPerWake is the maximum number of segments to process in the main +// protocol goroutine per wake-up. Yielding [after this number of segments are +// processed] allows other events to be processed as well (e.g., timeouts, +// resets, etc.). +const maxSegmentsPerWake = 100 + +type handshakeState int + +// The following are the possible states of the TCP connection during a 3-way +// handshake. A depiction of the states and transitions can be found in RFC 793, +// page 23. +const ( + handshakeSynSent handshakeState = iota + handshakeSynRcvd + handshakeCompleted +) + +// The following are used to set up sleepers. +const ( + wakerForNotification = iota + wakerForNewSegment + wakerForResend + wakerForResolution +) + +const ( + // Maximum space available for options. + maxOptionSize = 40 +) + +// handshake holds the state used during a TCP 3-way handshake. +type handshake struct { + ep *endpoint + state handshakeState + active bool + flags uint8 + ackNum seqnum.Value + + // iss is the initial send sequence number, as defined in RFC 793. + iss seqnum.Value + + // rcvWnd is the receive window, as defined in RFC 793. + rcvWnd seqnum.Size + + // sndWnd is the send window, as defined in RFC 793. + sndWnd seqnum.Size + + // mss is the maximum segment size received from the peer. + mss uint16 + + // sndWndScale is the send window scale, as defined in RFC 1323. A + // negative value means no scaling is supported by the peer. + sndWndScale int + + // rcvWndScale is the receive window scale, as defined in RFC 1323. + rcvWndScale int +} + +func newHandshake(ep *endpoint, rcvWnd seqnum.Size) (handshake, *tcpip.Error) { + h := handshake{ + ep: ep, + active: true, + rcvWnd: rcvWnd, + rcvWndScale: FindWndScale(rcvWnd), + } + if err := h.resetState(); err != nil { + return handshake{}, err + } + + return h, nil +} + +// FindWndScale determines the window scale to use for the given maximum window +// size. +func FindWndScale(wnd seqnum.Size) int { + if wnd < 0x10000 { + return 0 + } + + max := seqnum.Size(0xffff) + s := 0 + for wnd > max && s < header.MaxWndScale { + s++ + max <<= 1 + } + + return s +} + +// resetState resets the state of the handshake object such that it becomes +// ready for a new 3-way handshake. +func (h *handshake) resetState() *tcpip.Error { + b := make([]byte, 4) + if _, err := rand.Read(b); err != nil { + panic(err) + } + + h.state = handshakeSynSent + h.flags = flagSyn + h.ackNum = 0 + h.mss = 0 + h.iss = seqnum.Value(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + + return nil +} + +// effectiveRcvWndScale returns the effective receive window scale to be used. +// If the peer doesn't support window scaling, the effective rcv wnd scale is +// zero; otherwise it's the value calculated based on the initial rcv wnd. +func (h *handshake) effectiveRcvWndScale() uint8 { + if h.sndWndScale < 0 { + return 0 + } + return uint8(h.rcvWndScale) +} + +// resetToSynRcvd resets the state of the handshake object to the SYN-RCVD +// state. +func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts *header.TCPSynOptions) { + h.active = false + h.state = handshakeSynRcvd + h.flags = flagSyn | flagAck + h.iss = iss + h.ackNum = irs + 1 + h.mss = opts.MSS + h.sndWndScale = opts.WS +} + +// checkAck checks if the ACK number, if present, of a segment received during +// a TCP 3-way handshake is valid. If it's not, a RST segment is sent back in +// response. +func (h *handshake) checkAck(s *segment) bool { + if s.flagIsSet(flagAck) && s.ackNumber != h.iss+1 { + // RFC 793, page 36, states that a reset must be generated when + // the connection is in any non-synchronized state and an + // incoming segment acknowledges something not yet sent. The + // connection remains in the same state. + ack := s.sequenceNumber.Add(s.logicalLen()) + h.ep.sendRaw(nil, flagRst|flagAck, s.ackNumber, ack, 0) + return false + } + + return true +} + +// synSentState handles a segment received when the TCP 3-way handshake is in +// the SYN-SENT state. +func (h *handshake) synSentState(s *segment) *tcpip.Error { + // RFC 793, page 37, states that in the SYN-SENT state, a reset is + // acceptable if the ack field acknowledges the SYN. + if s.flagIsSet(flagRst) { + if s.flagIsSet(flagAck) && s.ackNumber == h.iss+1 { + return tcpip.ErrConnectionRefused + } + return nil + } + + if !h.checkAck(s) { + return nil + } + + // We are in the SYN-SENT state. We only care about segments that have + // the SYN flag. + if !s.flagIsSet(flagSyn) { + return nil + } + + // Parse the SYN options. + rcvSynOpts := parseSynSegmentOptions(s) + + // Remember if the Timestamp option was negotiated. + h.ep.maybeEnableTimestamp(&rcvSynOpts) + + // Remember if the SACKPermitted option was negotiated. + h.ep.maybeEnableSACKPermitted(&rcvSynOpts) + + // Remember the sequence we'll ack from now on. + h.ackNum = s.sequenceNumber + 1 + h.flags |= flagAck + h.mss = rcvSynOpts.MSS + h.sndWndScale = rcvSynOpts.WS + + // If this is a SYN ACK response, we only need to acknowledge the SYN + // and the handshake is completed. + if s.flagIsSet(flagAck) { + h.state = handshakeCompleted + h.ep.sendRaw(nil, flagAck, h.iss+1, h.ackNum, h.rcvWnd>>h.effectiveRcvWndScale()) + return nil + } + + // A SYN segment was received, but no ACK in it. We acknowledge the SYN + // but resend our own SYN and wait for it to be acknowledged in the + // SYN-RCVD state. + h.state = handshakeSynRcvd + synOpts := header.TCPSynOptions{ + WS: h.rcvWndScale, + TS: rcvSynOpts.TS, + TSVal: h.ep.timestamp(), + TSEcr: h.ep.recentTS, + + // We only send SACKPermitted if the other side indicated it + // permits SACK. This is not explicitly defined in the RFC but + // this is the behaviour implemented by Linux. + SACKPermitted: rcvSynOpts.SACKPermitted, + } + sendSynTCP(&s.route, h.ep.id, h.flags, h.iss, h.ackNum, h.rcvWnd, synOpts) + + return nil +} + +// synRcvdState handles a segment received when the TCP 3-way handshake is in +// the SYN-RCVD state. +func (h *handshake) synRcvdState(s *segment) *tcpip.Error { + if s.flagIsSet(flagRst) { + // RFC 793, page 37, states that in the SYN-RCVD state, a reset + // is acceptable if the sequence number is in the window. + if s.sequenceNumber.InWindow(h.ackNum, h.rcvWnd) { + return tcpip.ErrConnectionRefused + } + return nil + } + + if !h.checkAck(s) { + return nil + } + + if s.flagIsSet(flagSyn) && s.sequenceNumber != h.ackNum-1 { + // We received two SYN segments with different sequence + // numbers, so we reset this and restart the whole + // process, except that we don't reset the timer. + ack := s.sequenceNumber.Add(s.logicalLen()) + seq := seqnum.Value(0) + if s.flagIsSet(flagAck) { + seq = s.ackNumber + } + h.ep.sendRaw(nil, flagRst|flagAck, seq, ack, 0) + + if !h.active { + return tcpip.ErrInvalidEndpointState + } + + if err := h.resetState(); err != nil { + return err + } + synOpts := header.TCPSynOptions{ + WS: h.rcvWndScale, + TS: h.ep.sendTSOk, + TSVal: h.ep.timestamp(), + TSEcr: h.ep.recentTS, + SACKPermitted: h.ep.sackPermitted, + } + sendSynTCP(&s.route, h.ep.id, h.flags, h.iss, h.ackNum, h.rcvWnd, synOpts) + return nil + } + + // We have previously received (and acknowledged) the peer's SYN. If the + // peer acknowledges our SYN, the handshake is completed. + if s.flagIsSet(flagAck) { + + // If the timestamp option is negotiated and the segment does + // not carry a timestamp option then the segment must be dropped + // as per https://tools.ietf.org/html/rfc7323#section-3.2. + if h.ep.sendTSOk && !s.parsedOptions.TS { + atomic.AddUint64(&h.ep.stack.MutableStats().DroppedPackets, 1) + return nil + } + + // Update timestamp if required. See RFC7323, section-4.3. + h.ep.updateRecentTimestamp(s.parsedOptions.TSVal, h.ackNum, s.sequenceNumber) + + h.state = handshakeCompleted + return nil + } + + return nil +} + +// processSegments goes through the segment queue and processes up to +// maxSegmentsPerWake (if they're available). +func (h *handshake) processSegments() *tcpip.Error { + for i := 0; i < maxSegmentsPerWake; i++ { + s := h.ep.segmentQueue.dequeue() + if s == nil { + return nil + } + + h.sndWnd = s.window + if !s.flagIsSet(flagSyn) && h.sndWndScale > 0 { + h.sndWnd <<= uint8(h.sndWndScale) + } + + var err *tcpip.Error + switch h.state { + case handshakeSynRcvd: + err = h.synRcvdState(s) + case handshakeSynSent: + err = h.synSentState(s) + } + s.decRef() + if err != nil { + return err + } + + // We stop processing packets once the handshake is completed, + // otherwise we may process packets meant to be processed by + // the main protocol goroutine. + if h.state == handshakeCompleted { + break + } + } + + // If the queue is not empty, make sure we'll wake up in the next + // iteration. + if !h.ep.segmentQueue.empty() { + h.ep.newSegmentWaker.Assert() + } + + return nil +} + +func (h *handshake) resolveRoute() *tcpip.Error { + // Set up the wakers. + s := sleep.Sleeper{} + resolutionWaker := &sleep.Waker{} + s.AddWaker(resolutionWaker, wakerForResolution) + s.AddWaker(&h.ep.notificationWaker, wakerForNotification) + defer s.Done() + + // Initial action is to resolve route. + index := wakerForResolution + for { + switch index { + case wakerForResolution: + if err := h.ep.route.Resolve(resolutionWaker); err != tcpip.ErrWouldBlock { + // Either success (err == nil) or failure. + return err + } + // Resolution not completed. Keep trying... + + case wakerForNotification: + n := h.ep.fetchNotifications() + if n¬ifyClose != 0 { + h.ep.route.RemoveWaker(resolutionWaker) + return tcpip.ErrAborted + } + } + + // Wait for notification. + index, _ = s.Fetch(true) + } +} + +// execute executes the TCP 3-way handshake. +func (h *handshake) execute() *tcpip.Error { + if h.ep.route.IsResolutionRequired() { + if err := h.resolveRoute(); err != nil { + return err + } + } + + // Initialize the resend timer. + resendWaker := sleep.Waker{} + timeOut := time.Duration(time.Second) + rt := time.AfterFunc(timeOut, func() { + resendWaker.Assert() + }) + defer rt.Stop() + + // Set up the wakers. + s := sleep.Sleeper{} + s.AddWaker(&resendWaker, wakerForResend) + s.AddWaker(&h.ep.notificationWaker, wakerForNotification) + s.AddWaker(&h.ep.newSegmentWaker, wakerForNewSegment) + defer s.Done() + + var sackEnabled SACKEnabled + if err := h.ep.stack.TransportProtocolOption(ProtocolNumber, &sackEnabled); err != nil { + // If stack returned an error when checking for SACKEnabled + // status then just default to switching off SACK negotiation. + sackEnabled = false + } + + // Send the initial SYN segment and loop until the handshake is + // completed. + synOpts := header.TCPSynOptions{ + WS: h.rcvWndScale, + TS: true, + TSVal: h.ep.timestamp(), + TSEcr: h.ep.recentTS, + SACKPermitted: bool(sackEnabled), + } + + // Execute is also called in a listen context so we want to make sure we + // only send the TS/SACK option when we received the TS/SACK in the + // initial SYN. + if h.state == handshakeSynRcvd { + synOpts.TS = h.ep.sendTSOk + synOpts.SACKPermitted = h.ep.sackPermitted && bool(sackEnabled) + } + sendSynTCP(&h.ep.route, h.ep.id, h.flags, h.iss, h.ackNum, h.rcvWnd, synOpts) + for h.state != handshakeCompleted { + switch index, _ := s.Fetch(true); index { + case wakerForResend: + timeOut *= 2 + if timeOut > 60*time.Second { + return tcpip.ErrTimeout + } + rt.Reset(timeOut) + sendSynTCP(&h.ep.route, h.ep.id, h.flags, h.iss, h.ackNum, h.rcvWnd, synOpts) + + case wakerForNotification: + n := h.ep.fetchNotifications() + if n¬ifyClose != 0 { + return tcpip.ErrAborted + } + + case wakerForNewSegment: + if err := h.processSegments(); err != nil { + return err + } + } + } + + return nil +} + +func parseSynSegmentOptions(s *segment) header.TCPSynOptions { + synOpts := header.ParseSynOptions(s.options, s.flagIsSet(flagAck)) + if synOpts.TS { + s.parsedOptions.TSVal = synOpts.TSVal + s.parsedOptions.TSEcr = synOpts.TSEcr + } + return synOpts +} + +var optionPool = sync.Pool{ + New: func() interface{} { + return make([]byte, maxOptionSize) + }, +} + +func getOptions() []byte { + return optionPool.Get().([]byte) +} + +func putOptions(options []byte) { + // Reslice to full capacity. + optionPool.Put(options[0:cap(options)]) +} + +func makeSynOptions(opts header.TCPSynOptions) []byte { + // Emulate linux option order. This is as follows: + // + // if md5: NOP NOP MD5SIG 18 md5sig(16) + // if mss: MSS 4 mss(2) + // if ts and sack_advertise: + // SACK 2 TIMESTAMP 2 timestamp(8) + // elif ts: NOP NOP TIMESTAMP 10 timestamp(8) + // elif sack: NOP NOP SACK 2 + // if wscale: NOP WINDOW 3 ws(1) + // if sack_blocks: NOP NOP SACK ((2 + (#blocks * 8)) + // [for each block] start_seq(4) end_seq(4) + // if fastopen_cookie: + // if exp: EXP (4 + len(cookie)) FASTOPEN_MAGIC(2) + // else: FASTOPEN (2 + len(cookie)) + // cookie(variable) [padding to four bytes] + // + options := getOptions() + + // Always encode the mss. + offset := header.EncodeMSSOption(uint32(opts.MSS), options) + + // Special ordering is required here. If both TS and SACK are enabled, + // then the SACK option precedes TS, with no padding. If they are + // enabled individually, then we see padding before the option. + if opts.TS && opts.SACKPermitted { + offset += header.EncodeSACKPermittedOption(options[offset:]) + offset += header.EncodeTSOption(opts.TSVal, opts.TSEcr, options[offset:]) + } else if opts.TS { + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeTSOption(opts.TSVal, opts.TSEcr, options[offset:]) + } else if opts.SACKPermitted { + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeSACKPermittedOption(options[offset:]) + } + + // Initialize the WS option. + if opts.WS >= 0 { + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeWSOption(opts.WS, options[offset:]) + } + + // Padding to the end; note that this never apply unless we add a + // fastopen option, we always expect the offset to remain the same. + if delta := header.AddTCPOptionPadding(options, offset); delta != 0 { + panic("unexpected option encoding") + } + + return options[:offset] +} + +func sendSynTCP(r *stack.Route, id stack.TransportEndpointID, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size, opts header.TCPSynOptions) *tcpip.Error { + // The MSS in opts is automatically calculated as this function is + // called from many places and we don't want every call point being + // embedded with the MSS calculation. + if opts.MSS == 0 { + opts.MSS = uint16(r.MTU() - header.TCPMinimumSize) + } + + options := makeSynOptions(opts) + err := sendTCPWithOptions(r, id, nil, flags, seq, ack, rcvWnd, options) + putOptions(options) + return err +} + +// sendTCPWithOptions sends a TCP segment with the provided options via the +// provided network endpoint and under the provided identity. +func sendTCPWithOptions(r *stack.Route, id stack.TransportEndpointID, data buffer.View, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size, opts []byte) *tcpip.Error { + optLen := len(opts) + // Allocate a buffer for the TCP header. + hdr := buffer.NewPrependable(header.TCPMinimumSize + int(r.MaxHeaderLength()) + optLen) + + if rcvWnd > 0xffff { + rcvWnd = 0xffff + } + + // Initialize the header. + tcp := header.TCP(hdr.Prepend(header.TCPMinimumSize + optLen)) + tcp.Encode(&header.TCPFields{ + SrcPort: id.LocalPort, + DstPort: id.RemotePort, + SeqNum: uint32(seq), + AckNum: uint32(ack), + DataOffset: uint8(header.TCPMinimumSize + optLen), + Flags: flags, + WindowSize: uint16(rcvWnd), + }) + copy(tcp[header.TCPMinimumSize:], opts) + + // Only calculate the checksum if offloading isn't supported. + if r.Capabilities()&stack.CapabilityChecksumOffload == 0 { + length := uint16(hdr.UsedLength()) + xsum := r.PseudoHeaderChecksum(ProtocolNumber) + if data != nil { + length += uint16(len(data)) + xsum = header.Checksum(data, xsum) + } + + tcp.SetChecksum(^tcp.CalculateChecksum(xsum, length)) + } + + return r.WritePacket(&hdr, data, ProtocolNumber) +} + +// sendTCP sends a TCP segment via the provided network endpoint and under the +// provided identity. +func sendTCP(r *stack.Route, id stack.TransportEndpointID, data buffer.View, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size) *tcpip.Error { + // Allocate a buffer for the TCP header. + hdr := buffer.NewPrependable(header.TCPMinimumSize + int(r.MaxHeaderLength())) + + if rcvWnd > 0xffff { + rcvWnd = 0xffff + } + + // Initialize the header. + tcp := header.TCP(hdr.Prepend(header.TCPMinimumSize)) + tcp.Encode(&header.TCPFields{ + SrcPort: id.LocalPort, + DstPort: id.RemotePort, + SeqNum: uint32(seq), + AckNum: uint32(ack), + DataOffset: header.TCPMinimumSize, + Flags: flags, + WindowSize: uint16(rcvWnd), + }) + + // Only calculate the checksum if offloading isn't supported. + if r.Capabilities()&stack.CapabilityChecksumOffload == 0 { + length := uint16(hdr.UsedLength()) + xsum := r.PseudoHeaderChecksum(ProtocolNumber) + if data != nil { + length += uint16(len(data)) + xsum = header.Checksum(data, xsum) + } + + tcp.SetChecksum(^tcp.CalculateChecksum(xsum, length)) + } + + return r.WritePacket(&hdr, data, ProtocolNumber) +} + +// makeOptions makes an options slice. +func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte { + options := getOptions() + offset := 0 + + // N.B. the ordering here matches the ordering used by Linux internally + // and described in the raw makeOptions function. We don't include + // unnecessary cases here (post connection.) + if e.sendTSOk { + // Embed the timestamp if timestamp has been enabled. + // + // We only use the lower 32 bits of the unix time in + // milliseconds. This is similar to what Linux does where it + // uses the lower 32 bits of the jiffies value in the tsVal + // field of the timestamp option. + // + // Further, RFC7323 section-5.4 recommends millisecond + // resolution as the lowest recommended resolution for the + // timestamp clock. + // + // Ref: https://tools.ietf.org/html/rfc7323#section-5.4. + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeTSOption(e.timestamp(), uint32(e.recentTS), options[offset:]) + } + if e.sackPermitted && len(sackBlocks) > 0 { + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeNOP(options[offset:]) + offset += header.EncodeSACKBlocks(sackBlocks, options[offset:]) + } + + // We expect the above to produce an aligned offset. + if delta := header.AddTCPOptionPadding(options, offset); delta != 0 { + panic("unexpected option encoding") + } + + return options[:offset] +} + +// sendRaw sends a TCP segment to the endpoint's peer. +func (e *endpoint) sendRaw(data buffer.View, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size) *tcpip.Error { + var sackBlocks []header.SACKBlock + if e.state == stateConnected && e.rcv.pendingBufSize > 0 && (flags&flagAck != 0) { + sackBlocks = e.sack.Blocks[:e.sack.NumBlocks] + } + options := e.makeOptions(sackBlocks) + if len(options) > 0 { + err := sendTCPWithOptions(&e.route, e.id, data, flags, seq, ack, rcvWnd, options) + putOptions(options) + return err + } + err := sendTCP(&e.route, e.id, data, flags, seq, ack, rcvWnd) + putOptions(options) + return err +} + +func (e *endpoint) handleWrite() bool { + // Move packets from send queue to send list. The queue is accessible + // from other goroutines and protected by the send mutex, while the send + // list is only accessible from the handler goroutine, so it needs no + // mutexes. + e.sndBufMu.Lock() + + first := e.sndQueue.Front() + if first != nil { + e.snd.writeList.PushBackList(&e.sndQueue) + e.snd.sndNxtList.UpdateForward(e.sndBufInQueue) + e.sndBufInQueue = 0 + } + + e.sndBufMu.Unlock() + + // Initialize the next segment to write if it's currently nil. + if e.snd.writeNext == nil { + e.snd.writeNext = first + } + + // Push out any new packets. + e.snd.sendData() + + return true +} + +func (e *endpoint) handleClose() bool { + // Drain the send queue. + e.handleWrite() + + // Mark send side as closed. + e.snd.closed = true + + return true +} + +// resetConnection sends a RST segment and puts the endpoint in an error state +// with the given error code. +// This method must only be called from the protocol goroutine. +func (e *endpoint) resetConnection(err *tcpip.Error) { + e.sendRaw(nil, flagAck|flagRst, e.snd.sndUna, e.rcv.rcvNxt, 0) + + e.mu.Lock() + e.state = stateError + e.hardError = err + e.mu.Unlock() +} + +// completeWorker is called by the worker goroutine when it's about to exit. It +// marks the worker as completed and performs cleanup work if requested by +// Close(). +func (e *endpoint) completeWorker() { + e.mu.Lock() + defer e.mu.Unlock() + + e.workerRunning = false + if e.workerCleanup { + e.cleanup() + } +} + +// handleSegments pulls segments from the queue and processes them. It returns +// true if the protocol loop should continue, false otherwise. +func (e *endpoint) handleSegments() bool { + checkRequeue := true + for i := 0; i < maxSegmentsPerWake; i++ { + s := e.segmentQueue.dequeue() + if s == nil { + checkRequeue = false + break + } + + // Invoke the tcp probe if installed. + if e.probe != nil { + e.probe(e.completeState()) + } + + if s.flagIsSet(flagRst) { + if e.rcv.acceptable(s.sequenceNumber, 0) { + // RFC 793, page 37 states that "in all states + // except SYN-SENT, all reset (RST) segments are + // validated by checking their SEQ-fields." So + // we only process it if it's acceptable. + s.decRef() + e.mu.Lock() + e.state = stateError + e.hardError = tcpip.ErrConnectionReset + e.mu.Unlock() + return false + } + } else if s.flagIsSet(flagAck) { + // Patch the window size in the segment according to the + // send window scale. + s.window <<= e.snd.sndWndScale + + // If the timestamp option is negotiated and the segment + // does not carry a timestamp option then the segment + // must be dropped as per + // https://tools.ietf.org/html/rfc7323#section-3.2. + if e.sendTSOk && !s.parsedOptions.TS { + atomic.AddUint64(&e.stack.MutableStats().DroppedPackets, 1) + s.decRef() + continue + } + + // RFC 793, page 41 states that "once in the ESTABLISHED + // state all segments must carry current acknowledgment + // information." + e.rcv.handleRcvdSegment(s) + e.snd.handleRcvdSegment(s) + } + s.decRef() + } + + // If the queue is not empty, make sure we'll wake up in the next + // iteration. + if checkRequeue && !e.segmentQueue.empty() { + e.newSegmentWaker.Assert() + } + + // Send an ACK for all processed packets if needed. + if e.rcv.rcvNxt != e.snd.maxSentAck { + e.snd.sendAck() + } + + return true +} + +// protocolMainLoop is the main loop of the TCP protocol. It runs in its own +// goroutine and is responsible for sending segments and handling received +// segments. +func (e *endpoint) protocolMainLoop(passive bool) *tcpip.Error { + var closeTimer *time.Timer + var closeWaker sleep.Waker + + defer func() { + e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut) + e.completeWorker() + + if e.snd != nil { + e.snd.resendTimer.cleanup() + } + + if closeTimer != nil { + closeTimer.Stop() + } + }() + + if !passive { + // This is an active connection, so we must initiate the 3-way + // handshake, and then inform potential waiters about its + // completion. + h, err := newHandshake(e, seqnum.Size(e.receiveBufferAvailable())) + if err == nil { + err = h.execute() + } + if err != nil { + e.lastErrorMu.Lock() + e.lastError = err + e.lastErrorMu.Unlock() + + e.mu.Lock() + e.state = stateError + e.hardError = err + e.mu.Unlock() + + return err + } + + // Transfer handshake state to TCP connection. We disable + // receive window scaling if the peer doesn't support it + // (indicated by a negative send window scale). + e.snd = newSender(e, h.iss, h.ackNum-1, h.sndWnd, h.mss, h.sndWndScale) + + e.rcvListMu.Lock() + e.rcv = newReceiver(e, h.ackNum-1, h.rcvWnd, h.effectiveRcvWndScale()) + e.rcvListMu.Unlock() + } + + // Tell waiters that the endpoint is connected and writable. + e.mu.Lock() + e.state = stateConnected + e.mu.Unlock() + + e.waiterQueue.Notify(waiter.EventOut) + + // When the protocol loop exits we should wake up our waiters with EventHUp. + defer e.waiterQueue.Notify(waiter.EventHUp) + + // Set up the functions that will be called when the main protocol loop + // wakes up. + funcs := []struct { + w *sleep.Waker + f func() bool + }{ + { + w: &e.sndWaker, + f: e.handleWrite, + }, + { + w: &e.sndCloseWaker, + f: e.handleClose, + }, + { + w: &e.newSegmentWaker, + f: e.handleSegments, + }, + { + w: &closeWaker, + f: func() bool { + e.resetConnection(tcpip.ErrConnectionAborted) + return false + }, + }, + { + w: &e.snd.resendWaker, + f: func() bool { + if !e.snd.retransmitTimerExpired() { + e.resetConnection(tcpip.ErrTimeout) + return false + } + return true + }, + }, + { + w: &e.notificationWaker, + f: func() bool { + n := e.fetchNotifications() + if n¬ifyNonZeroReceiveWindow != 0 { + e.rcv.nonZeroWindow() + } + + if n¬ifyReceiveWindowChanged != 0 { + e.rcv.pendingBufSize = seqnum.Size(e.receiveBufferSize()) + } + + if n¬ifyMTUChanged != 0 { + e.sndBufMu.Lock() + count := e.packetTooBigCount + e.packetTooBigCount = 0 + mtu := e.sndMTU + e.sndBufMu.Unlock() + + e.snd.updateMaxPayloadSize(mtu, count) + } + + if n¬ifyClose != 0 && closeTimer == nil { + // Reset the connection 3 seconds after the + // endpoint has been closed. + closeTimer = time.AfterFunc(3*time.Second, func() { + closeWaker.Assert() + }) + } + return true + }, + }, + } + + // Initialize the sleeper based on the wakers in funcs. + s := sleep.Sleeper{} + for i := range funcs { + s.AddWaker(funcs[i].w, i) + } + + // Main loop. Handle segments until both send and receive ends of the + // connection have completed. + for !e.rcv.closed || !e.snd.closed || e.snd.sndUna != e.snd.sndNxtList { + e.workMu.Unlock() + v, _ := s.Fetch(true) + e.workMu.Lock() + if !funcs[v].f() { + return nil + } + } + + // Mark endpoint as closed. + e.mu.Lock() + e.state = stateClosed + e.mu.Unlock() + + return nil +} diff --git a/pkg/tcpip/transport/tcp/dual_stack_test.go b/pkg/tcpip/transport/tcp/dual_stack_test.go new file mode 100644 index 000000000..a89af4559 --- /dev/null +++ b/pkg/tcpip/transport/tcp/dual_stack_test.go @@ -0,0 +1,550 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp_test + +import ( + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/checker" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp/testing/context" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +func TestV4MappedConnectOnV6Only(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(true) + + // Start connection attempt, it must fail. + err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV4MappedAddr, Port: context.TestPort}) + if err != tcpip.ErrNoRoute { + t.Fatalf("Unexpected return value from Connect: %v", err) + } +} + +func testV4Connect(t *testing.T, c *context.Context) { + // Start connection attempt. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventOut) + defer c.WQ.EventUnregister(&we) + + err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV4MappedAddr, Port: context.TestPort}) + if err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + + // Receive SYN packet. + b := c.GetPacket() + checker.IPv4(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagSyn), + ), + ) + + tcp := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcp.SequenceNumber()) + + iss := seqnum.Value(789) + c.SendPacket(nil, &context.Headers{ + SrcPort: tcp.DestinationPort(), + DstPort: tcp.SourcePort(), + Flags: header.TCPFlagSyn | header.TCPFlagAck, + SeqNum: iss, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Receive ACK packet. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(iss)+1), + ), + ) + + // Wait for connection to be established. + select { + case <-ch: + err = c.EP.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + t.Fatalf("Unexpected error when connecting: %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for connection") + } +} + +func TestV4MappedConnect(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Test the connection request. + testV4Connect(t, c) +} + +func TestV4ConnectWhenBoundToWildcard(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test the connection request. + testV4Connect(t, c) +} + +func TestV4ConnectWhenBoundToV4MappedWildcard(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to v4 mapped wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test the connection request. + testV4Connect(t, c) +} + +func TestV4ConnectWhenBoundToV4Mapped(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to v4 mapped address. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV4MappedAddr}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test the connection request. + testV4Connect(t, c) +} + +func testV6Connect(t *testing.T, c *context.Context) { + // Start connection attempt to IPv6 address. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventOut) + defer c.WQ.EventUnregister(&we) + + err := c.EP.Connect(tcpip.FullAddress{Addr: context.TestV6Addr, Port: context.TestPort}) + if err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + + // Receive SYN packet. + b := c.GetV6Packet() + checker.IPv6(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagSyn), + ), + ) + + tcp := header.TCP(header.IPv6(b).Payload()) + c.IRS = seqnum.Value(tcp.SequenceNumber()) + + iss := seqnum.Value(789) + c.SendV6Packet(nil, &context.Headers{ + SrcPort: tcp.DestinationPort(), + DstPort: tcp.SourcePort(), + Flags: header.TCPFlagSyn | header.TCPFlagAck, + SeqNum: iss, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Receive ACK packet. + checker.IPv6(t, c.GetV6Packet(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(iss)+1), + ), + ) + + // Wait for connection to be established. + select { + case <-ch: + err = c.EP.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + t.Fatalf("Unexpected error when connecting: %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for connection") + } +} + +func TestV6Connect(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Test the connection request. + testV6Connect(t, c) +} + +func TestV6ConnectV6Only(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(true) + + // Test the connection request. + testV6Connect(t, c) +} + +func TestV6ConnectWhenBoundToWildcard(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test the connection request. + testV6Connect(t, c) +} + +func TestV6ConnectWhenBoundToLocalAddress(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to local address. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV6Addr}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test the connection request. + testV6Connect(t, c) +} + +func TestV4RefuseOnV6Only(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(true) + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Start listening. + if err := c.EP.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Send a SYN request. + irs := seqnum.Value(789) + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: irs, + RcvWnd: 30000, + }) + + // Receive the RST reply. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.SrcPort(context.StackPort), + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck), + checker.AckNum(uint32(irs)+1), + ), + ) +} + +func TestV6RefuseOnBoundToV4Mapped(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind and listen. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr, Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := c.EP.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Send a SYN request. + irs := seqnum.Value(789) + c.SendV6Packet(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: irs, + RcvWnd: 30000, + }) + + // Receive the RST reply. + checker.IPv6(t, c.GetV6Packet(), + checker.TCP( + checker.SrcPort(context.StackPort), + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagRst|header.TCPFlagAck), + checker.AckNum(uint32(irs)+1), + ), + ) +} + +func testV4Accept(t *testing.T, c *context.Context) { + // Start listening. + if err := c.EP.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Send a SYN request. + irs := seqnum.Value(789) + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: irs, + RcvWnd: 30000, + }) + + // Receive the SYN-ACK reply. + b := c.GetPacket() + tcp := header.TCP(header.IPv4(b).Payload()) + iss := seqnum.Value(tcp.SequenceNumber()) + checker.IPv4(t, b, + checker.TCP( + checker.SrcPort(context.StackPort), + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn), + checker.AckNum(uint32(irs)+1), + ), + ) + + // Send ACK. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagAck, + SeqNum: irs + 1, + AckNum: iss + 1, + RcvWnd: 30000, + }) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + nep, _, err := c.EP.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + nep, _, err = c.EP.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + // Make sure we get the same error when calling the original ep and the + // new one. This validates that v4-mapped endpoints are still able to + // query the V6Only flag, whereas pure v4 endpoints are not. + var v tcpip.V6OnlyOption + expected := c.EP.GetSockOpt(&v) + if err := nep.GetSockOpt(&v); err != expected { + t.Fatalf("GetSockOpt returned unexpected value: got %v, want %v", err, expected) + } + + // Check the peer address. + addr, err := nep.GetRemoteAddress() + if err != nil { + t.Fatalf("GetRemoteAddress failed failed: %v", err) + } + + if addr.Addr != context.TestAddr { + t.Fatalf("Unexpected remote address: got %v, want %v", addr.Addr, context.TestAddr) + } +} + +func TestV4AcceptOnV6(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Accept(t, c) +} + +func TestV4AcceptOnBoundToV4MappedWildcard(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind to v4 mapped wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.V4MappedWildcardAddr, Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Accept(t, c) +} + +func TestV4AcceptOnBoundToV4Mapped(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind and listen. + if err := c.EP.Bind(tcpip.FullAddress{Addr: context.StackV4MappedAddr, Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Accept(t, c) +} + +func TestV6AcceptOnV6(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateV6Endpoint(false) + + // Bind and listen. + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := c.EP.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Send a SYN request. + irs := seqnum.Value(789) + c.SendV6Packet(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: irs, + RcvWnd: 30000, + }) + + // Receive the SYN-ACK reply. + b := c.GetV6Packet() + tcp := header.TCP(header.IPv6(b).Payload()) + iss := seqnum.Value(tcp.SequenceNumber()) + checker.IPv6(t, b, + checker.TCP( + checker.SrcPort(context.StackPort), + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagSyn), + checker.AckNum(uint32(irs)+1), + ), + ) + + // Send ACK. + c.SendV6Packet(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagAck, + SeqNum: irs + 1, + AckNum: iss + 1, + RcvWnd: 30000, + }) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + nep, _, err := c.EP.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + nep, _, err = c.EP.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + // Make sure we can still query the v6 only status of the new endpoint, + // that is, that it is in fact a v6 socket. + var v tcpip.V6OnlyOption + if err := nep.GetSockOpt(&v); err != nil { + t.Fatalf("GetSockOpt failed failed: %v", err) + } + + // Check the peer address. + addr, err := nep.GetRemoteAddress() + if err != nil { + t.Fatalf("GetRemoteAddress failed failed: %v", err) + } + + if addr.Addr != context.TestV6Addr { + t.Fatalf("Unexpected remote address: got %v, want %v", addr.Addr, context.TestV6Addr) + } +} + +func TestV4AcceptOnV4(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Accept(t, c) +} diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go new file mode 100644 index 000000000..5d62589d8 --- /dev/null +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -0,0 +1,1371 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "crypto/rand" + "math" + "sync" + "sync/atomic" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tmutex" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +type endpointState int + +const ( + stateInitial endpointState = iota + stateBound + stateListen + stateConnecting + stateConnected + stateClosed + stateError +) + +// Reasons for notifying the protocol goroutine. +const ( + notifyNonZeroReceiveWindow = 1 << iota + notifyReceiveWindowChanged + notifyClose + notifyMTUChanged + notifyDrain +) + +// SACKInfo holds TCP SACK related information for a given endpoint. +type SACKInfo struct { + // Blocks is the maximum number of SACK blocks we track + // per endpoint. + Blocks [MaxSACKBlocks]header.SACKBlock + + // NumBlocks is the number of valid SACK blocks stored in the + // blocks array above. + NumBlocks int +} + +// endpoint represents a TCP endpoint. This struct serves as the interface +// between users of the endpoint and the protocol implementation; it is legal to +// have concurrent goroutines make calls into the endpoint, they are properly +// synchronized. The protocol implementation, however, runs in a single +// goroutine. +type endpoint struct { + // workMu is used to arbitrate which goroutine may perform protocol + // work. Only the main protocol goroutine is expected to call Lock() on + // it, but other goroutines (e.g., send) may call TryLock() to eagerly + // perform work without having to wait for the main one to wake up. + workMu tmutex.Mutex `state:"nosave"` + + // The following fields are initialized at creation time and do not + // change throughout the lifetime of the endpoint. + stack *stack.Stack `state:"manual"` + netProto tcpip.NetworkProtocolNumber + waiterQueue *waiter.Queue + + // lastError represents the last error that the endpoint reported; + // access to it is protected by the following mutex. + lastErrorMu sync.Mutex `state:"nosave"` + lastError *tcpip.Error + + // The following fields are used to manage the receive queue. The + // protocol goroutine adds ready-for-delivery segments to rcvList, + // which are returned by Read() calls to users. + // + // Once the peer has closed its send side, rcvClosed is set to true + // to indicate to users that no more data is coming. + rcvListMu sync.Mutex `state:"nosave"` + rcvList segmentList + rcvClosed bool + rcvBufSize int + rcvBufUsed int + + // The following fields are protected by the mutex. + mu sync.RWMutex `state:"nosave"` + id stack.TransportEndpointID + state endpointState + isPortReserved bool + isRegistered bool + boundNICID tcpip.NICID + route stack.Route `state:"manual"` + v6only bool + isConnectNotified bool + + // effectiveNetProtos contains the network protocols actually in use. In + // most cases it will only contain "netProto", but in cases like IPv6 + // endpoints with v6only set to false, this could include multiple + // protocols (e.g., IPv6 and IPv4) or a single different protocol (e.g., + // IPv4 when IPv6 endpoint is bound or connected to an IPv4 mapped + // address). + effectiveNetProtos []tcpip.NetworkProtocolNumber + + // hardError is meaningful only when state is stateError, it stores the + // error to be returned when read/write syscalls are called and the + // endpoint is in this state. + hardError *tcpip.Error + + // workerRunning specifies if a worker goroutine is running. + workerRunning bool + + // workerCleanup specifies if the worker goroutine must perform cleanup + // before exitting. This can only be set to true when workerRunning is + // also true, and they're both protected by the mutex. + workerCleanup bool + + // sendTSOk is used to indicate when the TS Option has been negotiated. + // When sendTSOk is true every non-RST segment should carry a TS as per + // RFC7323#section-1.1 + sendTSOk bool + + // recentTS is the timestamp that should be sent in the TSEcr field of + // the timestamp for future segments sent by the endpoint. This field is + // updated if required when a new segment is received by this endpoint. + recentTS uint32 + + // tsOffset is a randomized offset added to the value of the + // TSVal field in the timestamp option. + tsOffset uint32 + + // shutdownFlags represent the current shutdown state of the endpoint. + shutdownFlags tcpip.ShutdownFlags + + // sackPermitted is set to true if the peer sends the TCPSACKPermitted + // option in the SYN/SYN-ACK. + sackPermitted bool + + // sack holds TCP SACK related information for this endpoint. + sack SACKInfo + + // The options below aren't implemented, but we remember the user + // settings because applications expect to be able to set/query these + // options. + noDelay bool + reuseAddr bool + + // segmentQueue is used to hand received segments to the protocol + // goroutine. Segments are queued as long as the queue is not full, + // and dropped when it is. + segmentQueue segmentQueue `state:"zerovalue"` + + // The following fields are used to manage the send buffer. When + // segments are ready to be sent, they are added to sndQueue and the + // protocol goroutine is signaled via sndWaker. + // + // When the send side is closed, the protocol goroutine is notified via + // sndCloseWaker, and sndClosed is set to true. + sndBufMu sync.Mutex `state:"nosave"` + sndBufSize int + sndBufUsed int + sndClosed bool + sndBufInQueue seqnum.Size + sndQueue segmentList + sndWaker sleep.Waker `state:"manual"` + sndCloseWaker sleep.Waker `state:"manual"` + + // The following are used when a "packet too big" control packet is + // received. They are protected by sndBufMu. They are used to + // communicate to the main protocol goroutine how many such control + // messages have been received since the last notification was processed + // and what was the smallest MTU seen. + packetTooBigCount int + sndMTU int + + // newSegmentWaker is used to indicate to the protocol goroutine that + // it needs to wake up and handle new segments queued to it. + newSegmentWaker sleep.Waker `state:"manual"` + + // notificationWaker is used to indicate to the protocol goroutine that + // it needs to wake up and check for notifications. + notificationWaker sleep.Waker `state:"manual"` + + // notifyFlags is a bitmask of flags used to indicate to the protocol + // goroutine what it was notified; this is only accessed atomically. + notifyFlags uint32 `state:"zerovalue"` + + // acceptedChan is used by a listening endpoint protocol goroutine to + // send newly accepted connections to the endpoint so that they can be + // read by Accept() calls. + acceptedChan chan *endpoint `state:".(endpointChan)"` + + // The following are only used from the protocol goroutine, and + // therefore don't need locks to protect them. + rcv *receiver + snd *sender + + // The goroutine drain completion notification channel. + drainDone chan struct{} `state:"nosave"` + + // probe if not nil is invoked on every received segment. It is passed + // a copy of the current state of the endpoint. + probe stack.TCPProbeFunc `state:"nosave"` +} + +func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *endpoint { + e := &endpoint{ + stack: stack, + netProto: netProto, + waiterQueue: waiterQueue, + rcvBufSize: DefaultBufferSize, + sndBufSize: DefaultBufferSize, + sndMTU: int(math.MaxInt32), + noDelay: false, + reuseAddr: true, + } + + var ss SendBufferSizeOption + if err := stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil { + e.sndBufSize = ss.Default + } + + var rs ReceiveBufferSizeOption + if err := stack.TransportProtocolOption(ProtocolNumber, &rs); err == nil { + e.rcvBufSize = rs.Default + } + + if p := stack.GetTCPProbe(); p != nil { + e.probe = p + } + + e.segmentQueue.setLimit(2 * e.rcvBufSize) + e.workMu.Init() + e.workMu.Lock() + e.tsOffset = timeStampOffset() + return e +} + +// Readiness returns the current readiness of the endpoint. For example, if +// waiter.EventIn is set, the endpoint is immediately readable. +func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + result := waiter.EventMask(0) + + e.mu.RLock() + defer e.mu.RUnlock() + + switch e.state { + case stateInitial, stateBound, stateConnecting: + // Ready for nothing. + + case stateClosed, stateError: + // Ready for anything. + result = mask + + case stateListen: + // Check if there's anything in the accepted channel. + if (mask & waiter.EventIn) != 0 { + if len(e.acceptedChan) > 0 { + result |= waiter.EventIn + } + } + + case stateConnected: + // Determine if the endpoint is writable if requested. + if (mask & waiter.EventOut) != 0 { + e.sndBufMu.Lock() + if e.sndClosed || e.sndBufUsed < e.sndBufSize { + result |= waiter.EventOut + } + e.sndBufMu.Unlock() + } + + // Determine if the endpoint is readable if requested. + if (mask & waiter.EventIn) != 0 { + e.rcvListMu.Lock() + if e.rcvBufUsed > 0 || e.rcvClosed { + result |= waiter.EventIn + } + e.rcvListMu.Unlock() + } + } + + return result +} + +func (e *endpoint) fetchNotifications() uint32 { + return atomic.SwapUint32(&e.notifyFlags, 0) +} + +func (e *endpoint) notifyProtocolGoroutine(n uint32) { + for { + v := atomic.LoadUint32(&e.notifyFlags) + if v&n == n { + // The flags are already set. + return + } + + if atomic.CompareAndSwapUint32(&e.notifyFlags, v, v|n) { + if v == 0 { + // We are causing a transition from no flags to + // at least one flag set, so we must cause the + // protocol goroutine to wake up. + e.notificationWaker.Assert() + } + return + } + } +} + +// Close puts the endpoint in a closed state and frees all resources associated +// with it. It must be called only once and with no other concurrent calls to +// the endpoint. +func (e *endpoint) Close() { + // Issue a shutdown so that the peer knows we won't send any more data + // if we're connected, or stop accepting if we're listening. + e.Shutdown(tcpip.ShutdownWrite | tcpip.ShutdownRead) + + // While we hold the lock, determine if the cleanup should happen + // inline or if we should tell the worker (if any) to do the cleanup. + e.mu.Lock() + worker := e.workerRunning + if worker { + e.workerCleanup = true + } + + // We always release ports inline so that they are immediately available + // for reuse after Close() is called. If also registered, it means this + // is a listening socket, so we must unregister as well otherwise the + // next user would fail in Listen() when trying to register. + if e.isPortReserved { + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.id.LocalAddress, e.id.LocalPort) + e.isPortReserved = false + + if e.isRegistered { + e.stack.UnregisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.id) + e.isRegistered = false + } + } + + e.mu.Unlock() + + // Now that we don't hold the lock anymore, either perform the local + // cleanup or kick the worker to make sure it knows it needs to cleanup. + if !worker { + e.cleanup() + } else { + e.notifyProtocolGoroutine(notifyClose) + } +} + +// cleanup frees all resources associated with the endpoint. It is called after +// Close() is called and the worker goroutine (if any) is done with its work. +func (e *endpoint) cleanup() { + // Close all endpoints that might have been accepted by TCP but not by + // the client. + if e.acceptedChan != nil { + close(e.acceptedChan) + for n := range e.acceptedChan { + n.resetConnection(tcpip.ErrConnectionAborted) + n.Close() + } + } + + if e.isRegistered { + e.stack.UnregisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.id) + } + + e.route.Release() +} + +// Read reads data from the endpoint. +func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, *tcpip.Error) { + e.mu.RLock() + // The endpoint can be read if it's connected, or if it's already closed + // but has some pending unread data. Also note that a RST being received + // would cause the state to become stateError so we should allow the + // reads to proceed before returning a ECONNRESET. + if s := e.state; s != stateConnected && s != stateClosed && e.rcvBufUsed == 0 { + e.mu.RUnlock() + if s == stateError { + return buffer.View{}, e.hardError + } + return buffer.View{}, tcpip.ErrInvalidEndpointState + } + + e.rcvListMu.Lock() + v, err := e.readLocked() + e.rcvListMu.Unlock() + + e.mu.RUnlock() + + return v, err +} + +func (e *endpoint) readLocked() (buffer.View, *tcpip.Error) { + if e.rcvBufUsed == 0 { + if e.rcvClosed || e.state != stateConnected { + return buffer.View{}, tcpip.ErrClosedForReceive + } + return buffer.View{}, tcpip.ErrWouldBlock + } + + s := e.rcvList.Front() + views := s.data.Views() + v := views[s.viewToDeliver] + s.viewToDeliver++ + + if s.viewToDeliver >= len(views) { + e.rcvList.Remove(s) + s.decRef() + } + + scale := e.rcv.rcvWndScale + wasZero := e.zeroReceiveWindow(scale) + e.rcvBufUsed -= len(v) + if wasZero && !e.zeroReceiveWindow(scale) { + e.notifyProtocolGoroutine(notifyNonZeroReceiveWindow) + } + + return v, nil +} + +// Write writes data to the endpoint's peer. +func (e *endpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, *tcpip.Error) { + // Linux completely ignores any address passed to sendto(2) for TCP sockets + // (without the MSG_FASTOPEN flag). Corking is unimplemented, so opts.More + // and opts.EndOfRecord are also ignored. + + e.mu.RLock() + defer e.mu.RUnlock() + + // The endpoint cannot be written to if it's not connected. + if e.state != stateConnected { + switch e.state { + case stateError: + return 0, e.hardError + default: + return 0, tcpip.ErrClosedForSend + } + } + + // Nothing to do if the buffer is empty. + if p.Size() == 0 { + return 0, nil + } + + e.sndBufMu.Lock() + + // Check if the connection has already been closed for sends. + if e.sndClosed { + e.sndBufMu.Unlock() + return 0, tcpip.ErrClosedForSend + } + + // Check against the limit. + avail := e.sndBufSize - e.sndBufUsed + if avail <= 0 { + e.sndBufMu.Unlock() + return 0, tcpip.ErrWouldBlock + } + + v, perr := p.Get(avail) + if perr != nil { + e.sndBufMu.Unlock() + return 0, perr + } + + var err *tcpip.Error + if p.Size() > avail { + err = tcpip.ErrWouldBlock + } + l := len(v) + s := newSegmentFromView(&e.route, e.id, v) + + // Add data to the send queue. + e.sndBufUsed += l + e.sndBufInQueue += seqnum.Size(l) + e.sndQueue.PushBack(s) + + e.sndBufMu.Unlock() + + if e.workMu.TryLock() { + // Do the work inline. + e.handleWrite() + e.workMu.Unlock() + } else { + // Let the protocol goroutine do the work. + e.sndWaker.Assert() + } + return uintptr(l), err +} + +// Peek reads data without consuming it from the endpoint. +// +// This method does not block if there is no data pending. +func (e *endpoint) Peek(vec [][]byte) (uintptr, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + // The endpoint can be read if it's connected, or if it's already closed + // but has some pending unread data. + if s := e.state; s != stateConnected && s != stateClosed { + if s == stateError { + return 0, e.hardError + } + return 0, tcpip.ErrInvalidEndpointState + } + + e.rcvListMu.Lock() + defer e.rcvListMu.Unlock() + + if e.rcvBufUsed == 0 { + if e.rcvClosed || e.state != stateConnected { + return 0, tcpip.ErrClosedForReceive + } + return 0, tcpip.ErrWouldBlock + } + + // Make a copy of vec so we can modify the slide headers. + vec = append([][]byte(nil), vec...) + + var num uintptr + + for s := e.rcvList.Front(); s != nil; s = s.Next() { + views := s.data.Views() + + for i := s.viewToDeliver; i < len(views); i++ { + v := views[i] + + for len(v) > 0 { + if len(vec) == 0 { + return num, nil + } + if len(vec[0]) == 0 { + vec = vec[1:] + continue + } + + n := copy(vec[0], v) + v = v[n:] + vec[0] = vec[0][n:] + num += uintptr(n) + } + } + } + + return num, nil +} + +// zeroReceiveWindow checks if the receive window to be announced now would be +// zero, based on the amount of available buffer and the receive window scaling. +// +// It must be called with rcvListMu held. +func (e *endpoint) zeroReceiveWindow(scale uint8) bool { + if e.rcvBufUsed >= e.rcvBufSize { + return true + } + + return ((e.rcvBufSize - e.rcvBufUsed) >> scale) == 0 +} + +// SetSockOpt sets a socket option. +func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error { + switch v := opt.(type) { + case tcpip.NoDelayOption: + e.mu.Lock() + e.noDelay = v != 0 + e.mu.Unlock() + return nil + + case tcpip.ReuseAddressOption: + e.mu.Lock() + e.reuseAddr = v != 0 + e.mu.Unlock() + return nil + + case tcpip.ReceiveBufferSizeOption: + // Make sure the receive buffer size is within the min and max + // allowed. + var rs ReceiveBufferSizeOption + size := int(v) + if err := e.stack.TransportProtocolOption(ProtocolNumber, &rs); err == nil { + if size < rs.Min { + size = rs.Min + } + if size > rs.Max { + size = rs.Max + } + } + + mask := uint32(notifyReceiveWindowChanged) + + e.rcvListMu.Lock() + + // Make sure the receive buffer size allows us to send a + // non-zero window size. + scale := uint8(0) + if e.rcv != nil { + scale = e.rcv.rcvWndScale + } + if size>>scale == 0 { + size = 1 << scale + } + + // Make sure 2*size doesn't overflow. + if size > math.MaxInt32/2 { + size = math.MaxInt32 / 2 + } + + wasZero := e.zeroReceiveWindow(scale) + e.rcvBufSize = size + if wasZero && !e.zeroReceiveWindow(scale) { + mask |= notifyNonZeroReceiveWindow + } + e.rcvListMu.Unlock() + + e.segmentQueue.setLimit(2 * size) + + e.notifyProtocolGoroutine(mask) + return nil + + case tcpip.SendBufferSizeOption: + // Make sure the send buffer size is within the min and max + // allowed. + size := int(v) + var ss SendBufferSizeOption + if err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil { + if size < ss.Min { + size = ss.Min + } + if size > ss.Max { + size = ss.Max + } + } + + e.sndBufMu.Lock() + e.sndBufSize = size + e.sndBufMu.Unlock() + + return nil + + case tcpip.V6OnlyOption: + // We only recognize this option on v6 endpoints. + if e.netProto != header.IPv6ProtocolNumber { + return tcpip.ErrInvalidEndpointState + } + + e.mu.Lock() + defer e.mu.Unlock() + + // We only allow this to be set when we're in the initial state. + if e.state != stateInitial { + return tcpip.ErrInvalidEndpointState + } + + e.v6only = v != 0 + } + + return nil +} + +// readyReceiveSize returns the number of bytes ready to be received. +func (e *endpoint) readyReceiveSize() (int, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + // The endpoint cannot be in listen state. + if e.state == stateListen { + return 0, tcpip.ErrInvalidEndpointState + } + + e.rcvListMu.Lock() + defer e.rcvListMu.Unlock() + + return e.rcvBufUsed, nil +} + +// GetSockOpt implements tcpip.Endpoint.GetSockOpt. +func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error { + switch o := opt.(type) { + case tcpip.ErrorOption: + e.lastErrorMu.Lock() + err := e.lastError + e.lastError = nil + e.lastErrorMu.Unlock() + return err + + case *tcpip.SendBufferSizeOption: + e.sndBufMu.Lock() + *o = tcpip.SendBufferSizeOption(e.sndBufSize) + e.sndBufMu.Unlock() + return nil + + case *tcpip.ReceiveBufferSizeOption: + e.rcvListMu.Lock() + *o = tcpip.ReceiveBufferSizeOption(e.rcvBufSize) + e.rcvListMu.Unlock() + return nil + + case *tcpip.ReceiveQueueSizeOption: + v, err := e.readyReceiveSize() + if err != nil { + return err + } + + *o = tcpip.ReceiveQueueSizeOption(v) + return nil + + case *tcpip.NoDelayOption: + e.mu.RLock() + v := e.noDelay + e.mu.RUnlock() + + *o = 0 + if v { + *o = 1 + } + return nil + + case *tcpip.ReuseAddressOption: + e.mu.RLock() + v := e.reuseAddr + e.mu.RUnlock() + + *o = 0 + if v { + *o = 1 + } + return nil + + case *tcpip.V6OnlyOption: + // We only recognize this option on v6 endpoints. + if e.netProto != header.IPv6ProtocolNumber { + return tcpip.ErrUnknownProtocolOption + } + + e.mu.Lock() + v := e.v6only + e.mu.Unlock() + + *o = 0 + if v { + *o = 1 + } + return nil + + case *tcpip.TCPInfoOption: + *o = tcpip.TCPInfoOption{} + return nil + } + + return tcpip.ErrUnknownProtocolOption +} + +func (e *endpoint) checkV4Mapped(addr *tcpip.FullAddress) (tcpip.NetworkProtocolNumber, *tcpip.Error) { + netProto := e.netProto + if header.IsV4MappedAddress(addr.Addr) { + // Fail if using a v4 mapped address on a v6only endpoint. + if e.v6only { + return 0, tcpip.ErrNoRoute + } + + netProto = header.IPv4ProtocolNumber + addr.Addr = addr.Addr[header.IPv6AddressSize-header.IPv4AddressSize:] + if addr.Addr == "\x00\x00\x00\x00" { + addr.Addr = "" + } + } + + // Fail if we're bound to an address length different from the one we're + // checking. + if l := len(e.id.LocalAddress); l != 0 && len(addr.Addr) != 0 && l != len(addr.Addr) { + return 0, tcpip.ErrInvalidEndpointState + } + + return netProto, nil +} + +// Connect connects the endpoint to its peer. +func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { + e.mu.Lock() + defer e.mu.Unlock() + + netProto, err := e.checkV4Mapped(&addr) + if err != nil { + return err + } + + nicid := addr.NIC + switch e.state { + case stateBound: + // If we're already bound to a NIC but the caller is requesting + // that we use a different one now, we cannot proceed. + if e.boundNICID == 0 { + break + } + + if nicid != 0 && nicid != e.boundNICID { + return tcpip.ErrNoRoute + } + + nicid = e.boundNICID + + case stateInitial: + // Nothing to do. We'll eventually fill-in the gaps in the ID + // (if any) when we find a route. + + case stateConnecting: + // A connection request has already been issued but hasn't + // completed yet. + return tcpip.ErrAlreadyConnecting + + case stateConnected: + // The endpoint is already connected. If caller hasn't been notified yet, return success. + if !e.isConnectNotified { + e.isConnectNotified = true + return nil + } + // Otherwise return that it's already connected. + return tcpip.ErrAlreadyConnected + + case stateError: + return e.hardError + + default: + return tcpip.ErrInvalidEndpointState + } + + // Find a route to the desired destination. + r, err := e.stack.FindRoute(nicid, e.id.LocalAddress, addr.Addr, netProto) + if err != nil { + return err + } + defer r.Release() + + origID := e.id + + netProtos := []tcpip.NetworkProtocolNumber{netProto} + e.id.LocalAddress = r.LocalAddress + e.id.RemoteAddress = r.RemoteAddress + e.id.RemotePort = addr.Port + + if e.id.LocalPort != 0 { + // The endpoint is bound to a port, attempt to register it. + err := e.stack.RegisterTransportEndpoint(nicid, netProtos, ProtocolNumber, e.id, e) + if err != nil { + return err + } + } else { + // The endpoint doesn't have a local port yet, so try to get + // one. Make sure that it isn't one that will result in the same + // address/port for both local and remote (otherwise this + // endpoint would be trying to connect to itself). + sameAddr := e.id.LocalAddress == e.id.RemoteAddress + _, err := e.stack.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) { + if sameAddr && p == e.id.RemotePort { + return false, nil + } + + e.id.LocalPort = p + err := e.stack.RegisterTransportEndpoint(nicid, netProtos, ProtocolNumber, e.id, e) + switch err { + case nil: + return true, nil + case tcpip.ErrPortInUse: + return false, nil + default: + return false, err + } + }) + if err != nil { + return err + } + } + + // Remove the port reservation. This can happen when Bind is called + // before Connect: in such a case we don't want to hold on to + // reservations anymore. + if e.isPortReserved { + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, origID.LocalAddress, origID.LocalPort) + e.isPortReserved = false + } + + e.isRegistered = true + e.state = stateConnecting + e.route = r.Clone() + e.boundNICID = nicid + e.effectiveNetProtos = netProtos + e.workerRunning = true + + go e.protocolMainLoop(false) // S/R-FIXME + + return tcpip.ErrConnectStarted +} + +// ConnectEndpoint is not supported. +func (*endpoint) ConnectEndpoint(tcpip.Endpoint) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// Shutdown closes the read and/or write end of the endpoint connection to its +// peer. +func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { + e.mu.Lock() + defer e.mu.Unlock() + e.shutdownFlags |= flags + + switch e.state { + case stateConnected: + // Close for write. + if (flags & tcpip.ShutdownWrite) != 0 { + e.sndBufMu.Lock() + + if e.sndClosed { + // Already closed. + e.sndBufMu.Unlock() + break + } + + // Queue fin segment. + s := newSegmentFromView(&e.route, e.id, nil) + e.sndQueue.PushBack(s) + e.sndBufInQueue++ + + // Mark endpoint as closed. + e.sndClosed = true + + e.sndBufMu.Unlock() + + // Tell protocol goroutine to close. + e.sndCloseWaker.Assert() + } + + case stateListen: + // Tell protocolListenLoop to stop. + if flags&tcpip.ShutdownRead != 0 { + e.notifyProtocolGoroutine(notifyClose) + } + + default: + return tcpip.ErrInvalidEndpointState + } + + return nil +} + +// Listen puts the endpoint in "listen" mode, which allows it to accept +// new connections. +func (e *endpoint) Listen(backlog int) *tcpip.Error { + e.mu.Lock() + defer e.mu.Unlock() + + // Allow the backlog to be adjusted if the endpoint is not shutting down. + // When the endpoint shuts down, it sets workerCleanup to true, and from + // that point onward, acceptedChan is the responsibility of the cleanup() + // method (and should not be touched anywhere else, including here). + if e.state == stateListen && !e.workerCleanup { + // Adjust the size of the channel iff we can fix existing + // pending connections into the new one. + if len(e.acceptedChan) > backlog { + return tcpip.ErrInvalidEndpointState + } + origChan := e.acceptedChan + e.acceptedChan = make(chan *endpoint, backlog) + close(origChan) + for ep := range origChan { + e.acceptedChan <- ep + } + return nil + } + + // Endpoint must be bound before it can transition to listen mode. + if e.state != stateBound { + return tcpip.ErrInvalidEndpointState + } + + // Register the endpoint. + if err := e.stack.RegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.id, e); err != nil { + return err + } + + e.isRegistered = true + e.state = stateListen + if e.acceptedChan == nil { + e.acceptedChan = make(chan *endpoint, backlog) + } + e.workerRunning = true + + go e.protocolListenLoop( // S/R-SAFE: drained on save. + seqnum.Size(e.receiveBufferAvailable())) + + return nil +} + +// startAcceptedLoop sets up required state and starts a goroutine with the +// main loop for accepted connections. +func (e *endpoint) startAcceptedLoop(waiterQueue *waiter.Queue) { + e.waiterQueue = waiterQueue + e.workerRunning = true + go e.protocolMainLoop(true) // S/R-FIXME +} + +// Accept returns a new endpoint if a peer has established a connection +// to an endpoint previously set to listen mode. +func (e *endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + // Endpoint must be in listen state before it can accept connections. + if e.state != stateListen { + return nil, nil, tcpip.ErrInvalidEndpointState + } + + // Get the new accepted endpoint. + var n *endpoint + select { + case n = <-e.acceptedChan: + default: + return nil, nil, tcpip.ErrWouldBlock + } + + // Start the protocol goroutine. + wq := &waiter.Queue{} + n.startAcceptedLoop(wq) + + return n, wq, nil +} + +// Bind binds the endpoint to a specific local port and optionally address. +func (e *endpoint) Bind(addr tcpip.FullAddress, commit func() *tcpip.Error) (retErr *tcpip.Error) { + e.mu.Lock() + defer e.mu.Unlock() + + // Don't allow binding once endpoint is not in the initial state + // anymore. This is because once the endpoint goes into a connected or + // listen state, it is already bound. + if e.state != stateInitial { + return tcpip.ErrAlreadyBound + } + + netProto, err := e.checkV4Mapped(&addr) + if err != nil { + return err + } + + // Expand netProtos to include v4 and v6 if the caller is binding to a + // wildcard (empty) address, and this is an IPv6 endpoint with v6only + // set to false. + netProtos := []tcpip.NetworkProtocolNumber{netProto} + if netProto == header.IPv6ProtocolNumber && !e.v6only && addr.Addr == "" { + netProtos = []tcpip.NetworkProtocolNumber{ + header.IPv6ProtocolNumber, + header.IPv4ProtocolNumber, + } + } + + // Reserve the port. + port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port) + if err != nil { + return err + } + + e.isPortReserved = true + e.effectiveNetProtos = netProtos + e.id.LocalPort = port + + // Any failures beyond this point must remove the port registration. + defer func() { + if retErr != nil { + e.stack.ReleasePort(netProtos, ProtocolNumber, addr.Addr, port) + e.isPortReserved = false + e.effectiveNetProtos = nil + e.id.LocalPort = 0 + e.id.LocalAddress = "" + e.boundNICID = 0 + } + }() + + // If an address is specified, we must ensure that it's one of our + // local addresses. + if len(addr.Addr) != 0 { + nic := e.stack.CheckLocalAddress(addr.NIC, netProto, addr.Addr) + if nic == 0 { + return tcpip.ErrBadLocalAddress + } + + e.boundNICID = nic + e.id.LocalAddress = addr.Addr + } + + // Check the commit function. + if commit != nil { + if err := commit(); err != nil { + // The defer takes care of unwind. + return err + } + } + + // Mark endpoint as bound. + e.state = stateBound + + return nil +} + +// GetLocalAddress returns the address to which the endpoint is bound. +func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + return tcpip.FullAddress{ + Addr: e.id.LocalAddress, + Port: e.id.LocalPort, + NIC: e.boundNICID, + }, nil +} + +// GetRemoteAddress returns the address to which the endpoint is connected. +func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + if e.state != stateConnected { + return tcpip.FullAddress{}, tcpip.ErrNotConnected + } + + return tcpip.FullAddress{ + Addr: e.id.RemoteAddress, + Port: e.id.RemotePort, + NIC: e.boundNICID, + }, nil +} + +// HandlePacket is called by the stack when new packets arrive to this transport +// endpoint. +func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) { + s := newSegment(r, id, vv) + if !s.parse() { + atomic.AddUint64(&e.stack.MutableStats().MalformedRcvdPackets, 1) + s.decRef() + return + } + + // Send packet to worker goroutine. + if e.segmentQueue.enqueue(s) { + e.newSegmentWaker.Assert() + } else { + // The queue is full, so we drop the segment. + atomic.AddUint64(&e.stack.MutableStats().DroppedPackets, 1) + s.decRef() + } +} + +// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket. +func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { + switch typ { + case stack.ControlPacketTooBig: + e.sndBufMu.Lock() + e.packetTooBigCount++ + if v := int(extra); v < e.sndMTU { + e.sndMTU = v + } + e.sndBufMu.Unlock() + + e.notifyProtocolGoroutine(notifyMTUChanged) + } +} + +// updateSndBufferUsage is called by the protocol goroutine when room opens up +// in the send buffer. The number of newly available bytes is v. +func (e *endpoint) updateSndBufferUsage(v int) { + e.sndBufMu.Lock() + notify := e.sndBufUsed >= e.sndBufSize>>1 + e.sndBufUsed -= v + // We only notify when there is half the sndBufSize available after + // a full buffer event occurs. This ensures that we don't wake up + // writers to queue just 1-2 segments and go back to sleep. + notify = notify && e.sndBufUsed < e.sndBufSize>>1 + e.sndBufMu.Unlock() + + if notify { + e.waiterQueue.Notify(waiter.EventOut) + } +} + +// readyToRead is called by the protocol goroutine when a new segment is ready +// to be read, or when the connection is closed for receiving (in which case +// s will be nil). +func (e *endpoint) readyToRead(s *segment) { + e.rcvListMu.Lock() + if s != nil { + s.incRef() + e.rcvBufUsed += s.data.Size() + e.rcvList.PushBack(s) + } else { + e.rcvClosed = true + } + e.rcvListMu.Unlock() + + e.waiterQueue.Notify(waiter.EventIn) +} + +// receiveBufferAvailable calculates how many bytes are still available in the +// receive buffer. +func (e *endpoint) receiveBufferAvailable() int { + e.rcvListMu.Lock() + size := e.rcvBufSize + used := e.rcvBufUsed + e.rcvListMu.Unlock() + + // We may use more bytes than the buffer size when the receive buffer + // shrinks. + if used >= size { + return 0 + } + + return size - used +} + +func (e *endpoint) receiveBufferSize() int { + e.rcvListMu.Lock() + size := e.rcvBufSize + e.rcvListMu.Unlock() + + return size +} + +// updateRecentTimestamp updates the recent timestamp using the algorithm +// described in https://tools.ietf.org/html/rfc7323#section-4.3 +func (e *endpoint) updateRecentTimestamp(tsVal uint32, maxSentAck seqnum.Value, segSeq seqnum.Value) { + if e.sendTSOk && seqnum.Value(e.recentTS).LessThan(seqnum.Value(tsVal)) && segSeq.LessThanEq(maxSentAck) { + e.recentTS = tsVal + } +} + +// maybeEnableTimestamp marks the timestamp option enabled for this endpoint if +// the SYN options indicate that timestamp option was negotiated. It also +// initializes the recentTS with the value provided in synOpts.TSval. +func (e *endpoint) maybeEnableTimestamp(synOpts *header.TCPSynOptions) { + if synOpts.TS { + e.sendTSOk = true + e.recentTS = synOpts.TSVal + } +} + +// timestamp returns the timestamp value to be used in the TSVal field of the +// timestamp option for outgoing TCP segments for a given endpoint. +func (e *endpoint) timestamp() uint32 { + return tcpTimeStamp(e.tsOffset) +} + +// tcpTimeStamp returns a timestamp offset by the provided offset. This is +// not inlined above as it's used when SYN cookies are in use and endpoint +// is not created at the time when the SYN cookie is sent. +func tcpTimeStamp(offset uint32) uint32 { + now := time.Now() + return uint32(now.Unix()*1000+int64(now.Nanosecond()/1e6)) + offset +} + +// timeStampOffset returns a randomized timestamp offset to be used when sending +// timestamp values in a timestamp option for a TCP segment. +func timeStampOffset() uint32 { + b := make([]byte, 4) + if _, err := rand.Read(b); err != nil { + panic(err) + } + // Initialize a random tsOffset that will be added to the recentTS + // everytime the timestamp is sent when the Timestamp option is enabled. + // + // See https://tools.ietf.org/html/rfc7323#section-5.4 for details on + // why this is required. + // + // NOTE: This is not completely to spec as normally this should be + // initialized in a manner analogous to how sequence numbers are + // randomized per connection basis. But for now this is sufficient. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +// maybeEnableSACKPermitted marks the SACKPermitted option enabled for this endpoint +// if the SYN options indicate that the SACK option was negotiated and the TCP +// stack is configured to enable TCP SACK option. +func (e *endpoint) maybeEnableSACKPermitted(synOpts *header.TCPSynOptions) { + var v SACKEnabled + if err := e.stack.TransportProtocolOption(ProtocolNumber, &v); err != nil { + // Stack doesn't support SACK. So just return. + return + } + if bool(v) && synOpts.SACKPermitted { + e.sackPermitted = true + } +} + +// completeState makes a full copy of the endpoint and returns it. This is used +// before invoking the probe. The state returned may not be fully consistent if +// there are intervening syscalls when the state is being copied. +func (e *endpoint) completeState() stack.TCPEndpointState { + var s stack.TCPEndpointState + s.SegTime = time.Now() + + // Copy EndpointID. + e.mu.Lock() + s.ID = stack.TCPEndpointID(e.id) + e.mu.Unlock() + + // Copy endpoint rcv state. + e.rcvListMu.Lock() + s.RcvBufSize = e.rcvBufSize + s.RcvBufUsed = e.rcvBufUsed + s.RcvClosed = e.rcvClosed + e.rcvListMu.Unlock() + + // Endpoint TCP Option state. + s.SendTSOk = e.sendTSOk + s.RecentTS = e.recentTS + s.TSOffset = e.tsOffset + s.SACKPermitted = e.sackPermitted + s.SACK.Blocks = make([]header.SACKBlock, e.sack.NumBlocks) + copy(s.SACK.Blocks, e.sack.Blocks[:e.sack.NumBlocks]) + + // Copy endpoint send state. + e.sndBufMu.Lock() + s.SndBufSize = e.sndBufSize + s.SndBufUsed = e.sndBufUsed + s.SndClosed = e.sndClosed + s.SndBufInQueue = e.sndBufInQueue + s.PacketTooBigCount = e.packetTooBigCount + s.SndMTU = e.sndMTU + e.sndBufMu.Unlock() + + // Copy receiver state. + s.Receiver = stack.TCPReceiverState{ + RcvNxt: e.rcv.rcvNxt, + RcvAcc: e.rcv.rcvAcc, + RcvWndScale: e.rcv.rcvWndScale, + PendingBufUsed: e.rcv.pendingBufUsed, + PendingBufSize: e.rcv.pendingBufSize, + } + + // Copy sender state. + s.Sender = stack.TCPSenderState{ + LastSendTime: e.snd.lastSendTime, + DupAckCount: e.snd.dupAckCount, + FastRecovery: stack.TCPFastRecoveryState{ + Active: e.snd.fr.active, + First: e.snd.fr.first, + Last: e.snd.fr.last, + MaxCwnd: e.snd.fr.maxCwnd, + }, + SndCwnd: e.snd.sndCwnd, + Ssthresh: e.snd.sndSsthresh, + SndCAAckCount: e.snd.sndCAAckCount, + Outstanding: e.snd.outstanding, + SndWnd: e.snd.sndWnd, + SndUna: e.snd.sndUna, + SndNxt: e.snd.sndNxt, + RTTMeasureSeqNum: e.snd.rttMeasureSeqNum, + RTTMeasureTime: e.snd.rttMeasureTime, + Closed: e.snd.closed, + SRTT: e.snd.srtt, + RTO: e.snd.rto, + SRTTInited: e.snd.srttInited, + MaxPayloadSize: e.snd.maxPayloadSize, + SndWndScale: e.snd.sndWndScale, + MaxSentAck: e.snd.maxSentAck, + } + return s +} diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go new file mode 100644 index 000000000..dbb70ff21 --- /dev/null +++ b/pkg/tcpip/transport/tcp/endpoint_state.go @@ -0,0 +1,128 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// ErrSaveRejection indicates a failed save due to unsupported tcp endpoint +// state. +type ErrSaveRejection struct { + Err error +} + +// Error returns a sensible description of the save rejection error. +func (e ErrSaveRejection) Error() string { + return "save rejected due to unsupported endpoint state: " + e.Err.Error() +} + +// beforeSave is invoked by stateify. +func (e *endpoint) beforeSave() { + // Stop incoming packets. + e.segmentQueue.setLimit(0) + + e.mu.RLock() + defer e.mu.RUnlock() + + switch e.state { + case stateInitial: + case stateBound: + case stateListen: + if !e.segmentQueue.empty() { + e.mu.RUnlock() + e.drainDone = make(chan struct{}, 1) + e.notificationWaker.Assert() + <-e.drainDone + e.mu.RLock() + } + case stateConnecting: + panic(ErrSaveRejection{fmt.Errorf("endpoint in connecting state upon save: local %v:%v, remote %v:%v", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)}) + case stateConnected: + // FIXME + panic(ErrSaveRejection{fmt.Errorf("endpoint cannot be saved in connected state: local %v:%v, remote %v:%v", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)}) + case stateClosed: + case stateError: + default: + panic(fmt.Sprintf("endpoint in unknown state %v", e.state)) + } +} + +// afterLoad is invoked by stateify. +func (e *endpoint) afterLoad() { + e.stack = stack.StackFromEnv + + if e.state == stateListen { + e.state = stateBound + backlog := cap(e.acceptedChan) + e.acceptedChan = nil + defer func() { + if err := e.Listen(backlog); err != nil { + panic("endpoint listening failed: " + err.String()) + } + }() + } + + if e.state == stateBound { + e.state = stateInitial + defer func() { + if err := e.Bind(tcpip.FullAddress{Addr: e.id.LocalAddress, Port: e.id.LocalPort}, nil); err != nil { + panic("endpoint binding failed: " + err.String()) + } + }() + } + + if e.state == stateInitial { + var ss SendBufferSizeOption + if err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil { + if e.sndBufSize < ss.Min || e.sndBufSize > ss.Max { + panic(fmt.Sprintf("endpoint.sndBufSize %d is outside the min and max allowed [%d, %d]", e.sndBufSize, ss.Min, ss.Max)) + } + if e.rcvBufSize < ss.Min || e.rcvBufSize > ss.Max { + panic(fmt.Sprintf("endpoint.rcvBufSize %d is outside the min and max allowed [%d, %d]", e.rcvBufSize, ss.Min, ss.Max)) + } + } + } + + e.segmentQueue.setLimit(2 * e.rcvBufSize) + e.workMu.Init() +} + +// saveAcceptedChan is invoked by stateify. +func (e *endpoint) saveAcceptedChan() endpointChan { + if e.acceptedChan == nil { + return endpointChan{} + } + close(e.acceptedChan) + buffer := make([]*endpoint, 0, len(e.acceptedChan)) + for ep := range e.acceptedChan { + buffer = append(buffer, ep) + } + if len(buffer) != cap(buffer) { + panic("endpoint.acceptedChan buffer got consumed by background context") + } + c := cap(e.acceptedChan) + e.acceptedChan = nil + return endpointChan{buffer: buffer, cap: c} +} + +// loadAcceptedChan is invoked by stateify. +func (e *endpoint) loadAcceptedChan(c endpointChan) { + if c.cap == 0 { + return + } + e.acceptedChan = make(chan *endpoint, c.cap) + for _, ep := range c.buffer { + e.acceptedChan <- ep + } +} + +type endpointChan struct { + buffer []*endpoint + cap int +} diff --git a/pkg/tcpip/transport/tcp/forwarder.go b/pkg/tcpip/transport/tcp/forwarder.go new file mode 100644 index 000000000..657ac524f --- /dev/null +++ b/pkg/tcpip/transport/tcp/forwarder.go @@ -0,0 +1,161 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// Forwarder is a connection request forwarder, which allows clients to decide +// what to do with a connection request, for example: ignore it, send a RST, or +// attempt to complete the 3-way handshake. +// +// The canonical way of using it is to pass the Forwarder.HandlePacket function +// to stack.SetTransportProtocolHandler. +type Forwarder struct { + maxInFlight int + handler func(*ForwarderRequest) + + mu sync.Mutex + inFlight map[stack.TransportEndpointID]struct{} + listen *listenContext +} + +// NewForwarder allocates and initializes a new forwarder with the given +// maximum number of in-flight connection attempts. Once the maximum is reached +// new incoming connection requests will be ignored. +// +// If rcvWnd is set to zero, the default buffer size is used instead. +func NewForwarder(s *stack.Stack, rcvWnd, maxInFlight int, handler func(*ForwarderRequest)) *Forwarder { + if rcvWnd == 0 { + rcvWnd = DefaultBufferSize + } + return &Forwarder{ + maxInFlight: maxInFlight, + handler: handler, + inFlight: make(map[stack.TransportEndpointID]struct{}), + listen: newListenContext(s, seqnum.Size(rcvWnd), true, 0), + } +} + +// HandlePacket handles a packet if it is of interest to the forwarder (i.e., if +// it's a SYN packet), returning true if it's the case. Otherwise the packet +// is not handled and false is returned. +// +// This function is expected to be passed as an argument to the +// stack.SetTransportProtocolHandler function. +func (f *Forwarder) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) bool { + s := newSegment(r, id, vv) + defer s.decRef() + + // We only care about well-formed SYN packets. + if !s.parse() || s.flags != flagSyn { + return false + } + + opts := parseSynSegmentOptions(s) + + f.mu.Lock() + defer f.mu.Unlock() + + // We have an inflight request for this id, ignore this one for now. + if _, ok := f.inFlight[id]; ok { + return true + } + + // Ignore the segment if we're beyond the limit. + if len(f.inFlight) >= f.maxInFlight { + return true + } + + // Launch a new goroutine to handle the request. + f.inFlight[id] = struct{}{} + s.incRef() + go f.handler(&ForwarderRequest{ // S/R-FIXME + forwarder: f, + segment: s, + synOptions: opts, + }) + + return true +} + +// ForwarderRequest represents a connection request received by the forwarder +// and passed to the client. Clients must eventually call Complete() on it, and +// may optionally create an endpoint to represent it via CreateEndpoint. +type ForwarderRequest struct { + mu sync.Mutex + forwarder *Forwarder + segment *segment + synOptions header.TCPSynOptions +} + +// ID returns the 4-tuple (src address, src port, dst address, dst port) that +// represents the connection request. +func (r *ForwarderRequest) ID() stack.TransportEndpointID { + return r.segment.id +} + +// Complete completes the request, and optionally sends a RST segment back to the +// sender. +func (r *ForwarderRequest) Complete(sendReset bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.segment == nil { + panic("Completing already completed forwarder request") + } + + // Remove request from the forwarder. + r.forwarder.mu.Lock() + delete(r.forwarder.inFlight, r.segment.id) + r.forwarder.mu.Unlock() + + // If the caller requested, send a reset. + if sendReset { + replyWithReset(r.segment) + } + + // Release all resources. + r.segment.decRef() + r.segment = nil + r.forwarder = nil +} + +// CreateEndpoint creates a TCP endpoint for the connection request, performing +// the 3-way handshake in the process. +func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.segment == nil { + return nil, tcpip.ErrInvalidEndpointState + } + + f := r.forwarder + ep, err := f.listen.createEndpointAndPerformHandshake(r.segment, &header.TCPSynOptions{ + MSS: r.synOptions.MSS, + WS: r.synOptions.WS, + TS: r.synOptions.TS, + TSVal: r.synOptions.TSVal, + TSEcr: r.synOptions.TSEcr, + SACKPermitted: r.synOptions.SACKPermitted, + }) + if err != nil { + return nil, err + } + + // Start the protocol goroutine. + ep.startAcceptedLoop(queue) + + return ep, nil +} diff --git a/pkg/tcpip/transport/tcp/protocol.go b/pkg/tcpip/transport/tcp/protocol.go new file mode 100644 index 000000000..d81a1dd9b --- /dev/null +++ b/pkg/tcpip/transport/tcp/protocol.go @@ -0,0 +1,192 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tcp contains the implementation of the TCP transport protocol. To use +// it in the networking stack, this package must be added to the project, and +// activated on the stack by passing tcp.ProtocolName (or "tcp") as one of the +// transport protocols when calling stack.New(). Then endpoints can be created +// by passing tcp.ProtocolNumber as the transport protocol number when calling +// Stack.NewEndpoint(). +package tcp + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // ProtocolName is the string representation of the tcp protocol name. + ProtocolName = "tcp" + + // ProtocolNumber is the tcp protocol number. + ProtocolNumber = header.TCPProtocolNumber + + // MinBufferSize is the smallest size of a receive or send buffer. + minBufferSize = 4 << 10 // 4096 bytes. + + // DefaultBufferSize is the default size of the receive and send buffers. + DefaultBufferSize = 1 << 20 // 1MB + + // MaxBufferSize is the largest size a receive and send buffer can grow to. + maxBufferSize = 4 << 20 // 4MB +) + +// SACKEnabled option can be used to enable SACK support in the TCP +// protocol. See: https://tools.ietf.org/html/rfc2018. +type SACKEnabled bool + +// SendBufferSizeOption allows the default, min and max send buffer sizes for +// TCP endpoints to be queried or configured. +type SendBufferSizeOption struct { + Min int + Default int + Max int +} + +// ReceiveBufferSizeOption allows the default, min and max receive buffer size +// for TCP endpoints to be queried or configured. +type ReceiveBufferSizeOption struct { + Min int + Default int + Max int +} + +type protocol struct { + mu sync.Mutex + sackEnabled bool + sendBufferSize SendBufferSizeOption + recvBufferSize ReceiveBufferSizeOption +} + +// Number returns the tcp protocol number. +func (*protocol) Number() tcpip.TransportProtocolNumber { + return ProtocolNumber +} + +// NewEndpoint creates a new tcp endpoint. +func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + return newEndpoint(stack, netProto, waiterQueue), nil +} + +// MinimumPacketSize returns the minimum valid tcp packet size. +func (*protocol) MinimumPacketSize() int { + return header.TCPMinimumSize +} + +// ParsePorts returns the source and destination ports stored in the given tcp +// packet. +func (*protocol) ParsePorts(v buffer.View) (src, dst uint16, err *tcpip.Error) { + h := header.TCP(v) + return h.SourcePort(), h.DestinationPort(), nil +} + +// HandleUnknownDestinationPacket handles packets targeted at this protocol but +// that don't match any existing endpoint. +// +// RFC 793, page 36, states that "If the connection does not exist (CLOSED) then +// a reset is sent in response to any incoming segment except another reset. In +// particular, SYNs addressed to a non-existent connection are rejected by this +// means." +func (*protocol) HandleUnknownDestinationPacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) bool { + s := newSegment(r, id, vv) + defer s.decRef() + + if !s.parse() { + return false + } + + // There's nothing to do if this is already a reset packet. + if s.flagIsSet(flagRst) { + return true + } + + replyWithReset(s) + return true +} + +// replyWithReset replies to the given segment with a reset segment. +func replyWithReset(s *segment) { + // Get the seqnum from the packet if the ack flag is set. + seq := seqnum.Value(0) + if s.flagIsSet(flagAck) { + seq = s.ackNumber + } + + ack := s.sequenceNumber.Add(s.logicalLen()) + + sendTCP(&s.route, s.id, nil, flagRst|flagAck, seq, ack, 0) +} + +// SetOption implements TransportProtocol.SetOption. +func (p *protocol) SetOption(option interface{}) *tcpip.Error { + switch v := option.(type) { + case SACKEnabled: + p.mu.Lock() + p.sackEnabled = bool(v) + p.mu.Unlock() + return nil + + case SendBufferSizeOption: + if v.Min <= 0 || v.Default < v.Min || v.Default > v.Max { + return tcpip.ErrInvalidOptionValue + } + p.mu.Lock() + p.sendBufferSize = v + p.mu.Unlock() + return nil + + case ReceiveBufferSizeOption: + if v.Min <= 0 || v.Default < v.Min || v.Default > v.Max { + return tcpip.ErrInvalidOptionValue + } + p.mu.Lock() + p.recvBufferSize = v + p.mu.Unlock() + return nil + + default: + return tcpip.ErrUnknownProtocolOption + } +} + +// Option implements TransportProtocol.Option. +func (p *protocol) Option(option interface{}) *tcpip.Error { + switch v := option.(type) { + case *SACKEnabled: + p.mu.Lock() + *v = SACKEnabled(p.sackEnabled) + p.mu.Unlock() + return nil + + case *SendBufferSizeOption: + p.mu.Lock() + *v = p.sendBufferSize + p.mu.Unlock() + return nil + + case *ReceiveBufferSizeOption: + p.mu.Lock() + *v = p.recvBufferSize + p.mu.Unlock() + return nil + + default: + return tcpip.ErrUnknownProtocolOption + } +} + +func init() { + stack.RegisterTransportProtocolFactory(ProtocolName, func() stack.TransportProtocol { + return &protocol{ + sendBufferSize: SendBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize}, + recvBufferSize: ReceiveBufferSizeOption{minBufferSize, DefaultBufferSize, maxBufferSize}, + } + }) +} diff --git a/pkg/tcpip/transport/tcp/rcv.go b/pkg/tcpip/transport/tcp/rcv.go new file mode 100644 index 000000000..574602105 --- /dev/null +++ b/pkg/tcpip/transport/tcp/rcv.go @@ -0,0 +1,208 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "container/heap" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +// receiver holds the state necessary to receive TCP segments and turn them +// into a stream of bytes. +type receiver struct { + ep *endpoint + + rcvNxt seqnum.Value + + // rcvAcc is one beyond the last acceptable sequence number. That is, + // the "largest" sequence value that the receiver has announced to the + // its peer that it's willing to accept. This may be different than + // rcvNxt + rcvWnd if the receive window is reduced; in that case we + // have to reduce the window as we receive more data instead of + // shrinking it. + rcvAcc seqnum.Value + + rcvWndScale uint8 + + closed bool + + pendingRcvdSegments segmentHeap + pendingBufUsed seqnum.Size + pendingBufSize seqnum.Size +} + +func newReceiver(ep *endpoint, irs seqnum.Value, rcvWnd seqnum.Size, rcvWndScale uint8) *receiver { + return &receiver{ + ep: ep, + rcvNxt: irs + 1, + rcvAcc: irs.Add(rcvWnd + 1), + rcvWndScale: rcvWndScale, + pendingBufSize: rcvWnd, + } +} + +// acceptable checks if the segment sequence number range is acceptable +// according to the table on page 26 of RFC 793. +func (r *receiver) acceptable(segSeq seqnum.Value, segLen seqnum.Size) bool { + rcvWnd := r.rcvNxt.Size(r.rcvAcc) + if rcvWnd == 0 { + return segLen == 0 && segSeq == r.rcvNxt + } + + return segSeq.InWindow(r.rcvNxt, rcvWnd) || + seqnum.Overlap(r.rcvNxt, rcvWnd, segSeq, segLen) +} + +// getSendParams returns the parameters needed by the sender when building +// segments to send. +func (r *receiver) getSendParams() (rcvNxt seqnum.Value, rcvWnd seqnum.Size) { + // Calculate the window size based on the current buffer size. + n := r.ep.receiveBufferAvailable() + acc := r.rcvNxt.Add(seqnum.Size(n)) + if r.rcvAcc.LessThan(acc) { + r.rcvAcc = acc + } + + return r.rcvNxt, r.rcvNxt.Size(r.rcvAcc) >> r.rcvWndScale +} + +// nonZeroWindow is called when the receive window grows from zero to nonzero; +// in such cases we may need to send an ack to indicate to our peer that it can +// resume sending data. +func (r *receiver) nonZeroWindow() { + if (r.rcvAcc-r.rcvNxt)>>r.rcvWndScale != 0 { + // We never got around to announcing a zero window size, so we + // don't need to immediately announce a nonzero one. + return + } + + // Immediately send an ack. + r.ep.snd.sendAck() +} + +// consumeSegment attempts to consume a segment that was received by r. The +// segment may have just been received or may have been received earlier but +// wasn't ready to be consumed then. +// +// Returns true if the segment was consumed, false if it cannot be consumed +// yet because of a missing segment. +func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum.Size) bool { + if segLen > 0 { + // If the segment doesn't include the seqnum we're expecting to + // consume now, we're missing a segment. We cannot proceed until + // we receive that segment though. + if !r.rcvNxt.InWindow(segSeq, segLen) { + return false + } + + // Trim segment to eliminate already acknowledged data. + if segSeq.LessThan(r.rcvNxt) { + diff := segSeq.Size(r.rcvNxt) + segLen -= diff + segSeq.UpdateForward(diff) + s.sequenceNumber.UpdateForward(diff) + s.data.TrimFront(int(diff)) + } + + // Move segment to ready-to-deliver list. Wakeup any waiters. + r.ep.readyToRead(s) + + } else if segSeq != r.rcvNxt { + return false + } + + // Update the segment that we're expecting to consume. + r.rcvNxt = segSeq.Add(segLen) + + // Trim SACK Blocks to remove any SACK information that covers + // sequence numbers that have been consumed. + TrimSACKBlockList(&r.ep.sack, r.rcvNxt) + + if s.flagIsSet(flagFin) { + r.rcvNxt++ + + // Send ACK immediately. + r.ep.snd.sendAck() + + // Tell any readers that no more data will come. + r.closed = true + r.ep.readyToRead(nil) + + // Flush out any pending segments, except the very first one if + // it happens to be the one we're handling now because the + // caller is using it. + first := 0 + if len(r.pendingRcvdSegments) != 0 && r.pendingRcvdSegments[0] == s { + first = 1 + } + + for i := first; i < len(r.pendingRcvdSegments); i++ { + r.pendingRcvdSegments[i].decRef() + } + r.pendingRcvdSegments = r.pendingRcvdSegments[:first] + } + + return true +} + +// handleRcvdSegment handles TCP segments directed at the connection managed by +// r as they arrive. It is called by the protocol main loop. +func (r *receiver) handleRcvdSegment(s *segment) { + // We don't care about receive processing anymore if the receive side + // is closed. + if r.closed { + return + } + + segLen := seqnum.Size(s.data.Size()) + segSeq := s.sequenceNumber + + // If the sequence number range is outside the acceptable range, just + // send an ACK. This is according to RFC 793, page 37. + if !r.acceptable(segSeq, segLen) { + r.ep.snd.sendAck() + return + } + + // Defer segment processing if it can't be consumed now. + if !r.consumeSegment(s, segSeq, segLen) { + if segLen > 0 || s.flagIsSet(flagFin) { + // We only store the segment if it's within our buffer + // size limit. + if r.pendingBufUsed < r.pendingBufSize { + r.pendingBufUsed += s.logicalLen() + s.incRef() + heap.Push(&r.pendingRcvdSegments, s) + } + + UpdateSACKBlocks(&r.ep.sack, segSeq, segSeq.Add(segLen), r.rcvNxt) + + // Immediately send an ack so that the peer knows it may + // have to retransmit. + r.ep.snd.sendAck() + } + return + } + + // By consuming the current segment, we may have filled a gap in the + // sequence number domain that allows pending segments to be consumed + // now. So try to do it. + for !r.closed && r.pendingRcvdSegments.Len() > 0 { + s := r.pendingRcvdSegments[0] + segLen := seqnum.Size(s.data.Size()) + segSeq := s.sequenceNumber + + // Skip segment altogether if it has already been acknowledged. + if !segSeq.Add(segLen-1).LessThan(r.rcvNxt) && + !r.consumeSegment(s, segSeq, segLen) { + break + } + + heap.Pop(&r.pendingRcvdSegments) + r.pendingBufUsed -= s.logicalLen() + s.decRef() + } +} diff --git a/pkg/tcpip/transport/tcp/sack.go b/pkg/tcpip/transport/tcp/sack.go new file mode 100644 index 000000000..0b66305a5 --- /dev/null +++ b/pkg/tcpip/transport/tcp/sack.go @@ -0,0 +1,85 @@ +package tcp + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +const ( + // MaxSACKBlocks is the maximum number of SACK blocks stored + // at receiver side. + MaxSACKBlocks = 6 +) + +// UpdateSACKBlocks updates the list of SACK blocks to include the segment +// specified by segStart->segEnd. If the segment happens to be an out of order +// delivery then the first block in the sack.blocks always includes the +// segment identified by segStart->segEnd. +func UpdateSACKBlocks(sack *SACKInfo, segStart seqnum.Value, segEnd seqnum.Value, rcvNxt seqnum.Value) { + newSB := header.SACKBlock{Start: segStart, End: segEnd} + if sack.NumBlocks == 0 { + sack.Blocks[0] = newSB + sack.NumBlocks = 1 + return + } + var n = 0 + for i := 0; i < sack.NumBlocks; i++ { + start, end := sack.Blocks[i].Start, sack.Blocks[i].End + if end.LessThanEq(start) || start.LessThanEq(rcvNxt) { + // Discard any invalid blocks where end is before start + // and discard any sack blocks that are before rcvNxt as + // those have already been acked. + continue + } + if newSB.Start.LessThanEq(end) && start.LessThanEq(newSB.End) { + // Merge this SACK block into newSB and discard this SACK + // block. + if start.LessThan(newSB.Start) { + newSB.Start = start + } + if newSB.End.LessThan(end) { + newSB.End = end + } + } else { + // Save this block. + sack.Blocks[n] = sack.Blocks[i] + n++ + } + } + if rcvNxt.LessThan(newSB.Start) { + // If this was an out of order segment then make sure that the + // first SACK block is the one that includes the segment. + // + // See the first bullet point in + // https://tools.ietf.org/html/rfc2018#section-4 + if n == MaxSACKBlocks { + // If the number of SACK blocks is equal to + // MaxSACKBlocks then discard the last SACK block. + n-- + } + for i := n - 1; i >= 0; i-- { + sack.Blocks[i+1] = sack.Blocks[i] + } + sack.Blocks[0] = newSB + n++ + } + sack.NumBlocks = n +} + +// TrimSACKBlockList updates the sack block list by removing/modifying any block +// where start is < rcvNxt. +func TrimSACKBlockList(sack *SACKInfo, rcvNxt seqnum.Value) { + n := 0 + for i := 0; i < sack.NumBlocks; i++ { + if sack.Blocks[i].End.LessThanEq(rcvNxt) { + continue + } + if sack.Blocks[i].Start.LessThan(rcvNxt) { + // Shrink this SACK block. + sack.Blocks[i].Start = rcvNxt + } + sack.Blocks[n] = sack.Blocks[i] + n++ + } + sack.NumBlocks = n +} diff --git a/pkg/tcpip/transport/tcp/segment.go b/pkg/tcpip/transport/tcp/segment.go new file mode 100644 index 000000000..c742fc394 --- /dev/null +++ b/pkg/tcpip/transport/tcp/segment.go @@ -0,0 +1,145 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// Flags that may be set in a TCP segment. +const ( + flagFin = 1 << iota + flagSyn + flagRst + flagPsh + flagAck + flagUrg +) + +// segment represents a TCP segment. It holds the payload and parsed TCP segment +// information, and can be added to intrusive lists. +// segment is mostly immutable, the only field allowed to change is viewToDeliver. +type segment struct { + segmentEntry + refCnt int32 + id stack.TransportEndpointID + route stack.Route + data buffer.VectorisedView + // views is used as buffer for data when its length is large + // enough to store a VectorisedView. + views [8]buffer.View + // viewToDeliver keeps track of the next View that should be + // delivered by the Read endpoint. + viewToDeliver int + sequenceNumber seqnum.Value + ackNumber seqnum.Value + flags uint8 + window seqnum.Size + + // parsedOptions stores the parsed values from the options in the segment. + parsedOptions header.TCPOptions + options []byte +} + +func newSegment(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) *segment { + s := &segment{ + refCnt: 1, + id: id, + route: r.Clone(), + } + s.data = vv.Clone(s.views[:]) + return s +} + +func newSegmentFromView(r *stack.Route, id stack.TransportEndpointID, v buffer.View) *segment { + s := &segment{ + refCnt: 1, + id: id, + route: r.Clone(), + } + s.views[0] = v + s.data = buffer.NewVectorisedView(len(v), s.views[:1]) + return s +} + +func (s *segment) clone() *segment { + t := &segment{ + refCnt: 1, + id: s.id, + sequenceNumber: s.sequenceNumber, + ackNumber: s.ackNumber, + flags: s.flags, + window: s.window, + route: s.route.Clone(), + viewToDeliver: s.viewToDeliver, + } + t.data = s.data.Clone(t.views[:]) + return t +} + +func (s *segment) flagIsSet(flag uint8) bool { + return (s.flags & flag) != 0 +} + +func (s *segment) decRef() { + if atomic.AddInt32(&s.refCnt, -1) == 0 { + s.route.Release() + } +} + +func (s *segment) incRef() { + atomic.AddInt32(&s.refCnt, 1) +} + +// logicalLen is the segment length in the sequence number space. It's defined +// as the data length plus one for each of the SYN and FIN bits set. +func (s *segment) logicalLen() seqnum.Size { + l := seqnum.Size(s.data.Size()) + if s.flagIsSet(flagSyn) { + l++ + } + if s.flagIsSet(flagFin) { + l++ + } + return l +} + +// parse populates the sequence & ack numbers, flags, and window fields of the +// segment from the TCP header stored in the data. It then updates the view to +// skip the data. Returns boolean indicating if the parsing was successful. +func (s *segment) parse() bool { + h := header.TCP(s.data.First()) + + // h is the header followed by the payload. We check that the offset to + // the data respects the following constraints: + // 1. That it's at least the minimum header size; if we don't do this + // then part of the header would be delivered to user. + // 2. That the header fits within the buffer; if we don't do this, we + // would panic when we tried to access data beyond the buffer. + // + // N.B. The segment has already been validated as having at least the + // minimum TCP size before reaching here, so it's safe to read the + // fields. + offset := int(h.DataOffset()) + if offset < header.TCPMinimumSize || offset > len(h) { + return false + } + + s.options = []byte(h[header.TCPMinimumSize:offset]) + s.parsedOptions = header.ParseTCPOptions(s.options) + s.data.TrimFront(offset) + + s.sequenceNumber = seqnum.Value(h.SequenceNumber()) + s.ackNumber = seqnum.Value(h.AckNumber()) + s.flags = h.Flags() + s.window = seqnum.Size(h.WindowSize()) + + return true +} diff --git a/pkg/tcpip/transport/tcp/segment_heap.go b/pkg/tcpip/transport/tcp/segment_heap.go new file mode 100644 index 000000000..137ddbdd2 --- /dev/null +++ b/pkg/tcpip/transport/tcp/segment_heap.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +type segmentHeap []*segment + +// Len returns the length of h. +func (h segmentHeap) Len() int { + return len(h) +} + +// Less determines whether the i-th element of h is less than the j-th element. +func (h segmentHeap) Less(i, j int) bool { + return h[i].sequenceNumber.LessThan(h[j].sequenceNumber) +} + +// Swap swaps the i-th and j-th elements of h. +func (h segmentHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +// Push adds x as the last element of h. +func (h *segmentHeap) Push(x interface{}) { + *h = append(*h, x.(*segment)) +} + +// Pop removes the last element of h and returns it. +func (h *segmentHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[:n-1] + return x +} diff --git a/pkg/tcpip/transport/tcp/segment_queue.go b/pkg/tcpip/transport/tcp/segment_queue.go new file mode 100644 index 000000000..c4a7f7d5b --- /dev/null +++ b/pkg/tcpip/transport/tcp/segment_queue.go @@ -0,0 +1,69 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" +) + +// segmentQueue is a bounded, thread-safe queue of TCP segments. +type segmentQueue struct { + mu sync.Mutex + list segmentList + limit int + used int +} + +// empty determines if the queue is empty. +func (q *segmentQueue) empty() bool { + q.mu.Lock() + r := q.used == 0 + q.mu.Unlock() + + return r +} + +// setLimit updates the limit. No segments are immediately dropped in case the +// queue becomes full due to the new limit. +func (q *segmentQueue) setLimit(limit int) { + q.mu.Lock() + q.limit = limit + q.mu.Unlock() +} + +// enqueue adds the given segment to the queue. +// +// Returns true when the segment is successfully added to the queue, in which +// case ownership of the reference is transferred to the queue. And returns +// false if the queue is full, in which case ownership is retained by the +// caller. +func (q *segmentQueue) enqueue(s *segment) bool { + q.mu.Lock() + r := q.used < q.limit + if r { + q.list.PushBack(s) + q.used += s.data.Size() + header.TCPMinimumSize + } + q.mu.Unlock() + + return r +} + +// dequeue removes and returns the next segment from queue, if one exists. +// Ownership is transferred to the caller, who is responsible for decrementing +// the ref count when done. +func (q *segmentQueue) dequeue() *segment { + q.mu.Lock() + s := q.list.Front() + if s != nil { + q.list.Remove(s) + q.used -= s.data.Size() + header.TCPMinimumSize + } + q.mu.Unlock() + + return s +} diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go new file mode 100644 index 000000000..ad94aecd8 --- /dev/null +++ b/pkg/tcpip/transport/tcp/snd.go @@ -0,0 +1,628 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "math" + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +const ( + // minRTO is the minimum allowed value for the retransmit timeout. + minRTO = 200 * time.Millisecond + + // InitialCwnd is the initial congestion window. + InitialCwnd = 10 +) + +// sender holds the state necessary to send TCP segments. +type sender struct { + ep *endpoint + + // lastSendTime is the timestamp when the last packet was sent. + lastSendTime time.Time + + // dupAckCount is the number of duplicated acks received. It is used for + // fast retransmit. + dupAckCount int + + // fr holds state related to fast recovery. + fr fastRecovery + + // sndCwnd is the congestion window, in packets. + sndCwnd int + + // sndSsthresh is the threshold between slow start and congestion + // avoidance. + sndSsthresh int + + // sndCAAckCount is the number of packets acknowledged during congestion + // avoidance. When enough packets have been ack'd (typically cwnd + // packets), the congestion window is incremented by one. + sndCAAckCount int + + // outstanding is the number of outstanding packets, that is, packets + // that have been sent but not yet acknowledged. + outstanding int + + // sndWnd is the send window size. + sndWnd seqnum.Size + + // sndUna is the next unacknowledged sequence number. + sndUna seqnum.Value + + // sndNxt is the sequence number of the next segment to be sent. + sndNxt seqnum.Value + + // sndNxtList is the sequence number of the next segment to be added to + // the send list. + sndNxtList seqnum.Value + + // rttMeasureSeqNum is the sequence number being used for the latest RTT + // measurement. + rttMeasureSeqNum seqnum.Value + + // rttMeasureTime is the time when the rttMeasureSeqNum was sent. + rttMeasureTime time.Time + + closed bool + writeNext *segment + writeList segmentList + resendTimer timer `state:"nosave"` + resendWaker sleep.Waker `state:"nosave"` + + // srtt, rttvar & rto are the "smoothed round-trip time", "round-trip + // time variation" and "retransmit timeout", as defined in section 2 of + // RFC 6298. + srtt time.Duration + rttvar time.Duration + rto time.Duration + srttInited bool + + // maxPayloadSize is the maximum size of the payload of a given segment. + // It is initialized on demand. + maxPayloadSize int + + // sndWndScale is the number of bits to shift left when reading the send + // window size from a segment. + sndWndScale uint8 + + // maxSentAck is the maxium acknowledgement actually sent. + maxSentAck seqnum.Value +} + +// fastRecovery holds information related to fast recovery from a packet loss. +type fastRecovery struct { + // active whether the endpoint is in fast recovery. The following fields + // are only meaningful when active is true. + active bool + + // first and last represent the inclusive sequence number range being + // recovered. + first seqnum.Value + last seqnum.Value + + // maxCwnd is the maximum value the congestion window may be inflated to + // due to duplicate acks. This exists to avoid attacks where the + // receiver intentionally sends duplicate acks to artificially inflate + // the sender's cwnd. + maxCwnd int +} + +func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint16, sndWndScale int) *sender { + s := &sender{ + ep: ep, + sndCwnd: InitialCwnd, + sndSsthresh: math.MaxInt64, + sndWnd: sndWnd, + sndUna: iss + 1, + sndNxt: iss + 1, + sndNxtList: iss + 1, + rto: 1 * time.Second, + rttMeasureSeqNum: iss + 1, + lastSendTime: time.Now(), + maxPayloadSize: int(mss), + maxSentAck: irs + 1, + fr: fastRecovery{ + // See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 1. + last: iss, + }, + } + + // A negative sndWndScale means that no scaling is in use, otherwise we + // store the scaling value. + if sndWndScale > 0 { + s.sndWndScale = uint8(sndWndScale) + } + + s.updateMaxPayloadSize(int(ep.route.MTU()), 0) + + s.resendTimer.init(&s.resendWaker) + + return s +} + +// updateMaxPayloadSize updates the maximum payload size based on the given +// MTU. If this is in response to "packet too big" control packets (indicated +// by the count argument), it also reduces the number of oustanding packets and +// attempts to retransmit the first packet above the MTU size. +func (s *sender) updateMaxPayloadSize(mtu, count int) { + m := mtu - header.TCPMinimumSize + + // Calculate the maximum option size. + var maxSackBlocks [header.TCPMaxSACKBlocks]header.SACKBlock + options := s.ep.makeOptions(maxSackBlocks[:]) + m -= len(options) + putOptions(options) + + // We don't adjust up for now. + if m >= s.maxPayloadSize { + return + } + + // Make sure we can transmit at least one byte. + if m <= 0 { + m = 1 + } + + s.maxPayloadSize = m + + s.outstanding -= count + if s.outstanding < 0 { + s.outstanding = 0 + } + + // Rewind writeNext to the first segment exceeding the MTU. Do nothing + // if it is already before such a packet. + for seg := s.writeList.Front(); seg != nil; seg = seg.Next() { + if seg == s.writeNext { + // We got to writeNext before we could find a segment + // exceeding the MTU. + break + } + + if seg.data.Size() > m { + // We found a segment exceeding the MTU. Rewind + // writeNext and try to retransmit it. + s.writeNext = seg + break + } + } + + // Since we likely reduced the number of outstanding packets, we may be + // ready to send some more. + s.sendData() +} + +// sendAck sends an ACK segment. +func (s *sender) sendAck() { + s.sendSegment(nil, flagAck, s.sndNxt) +} + +// updateRTO updates the retransmit timeout when a new roud-trip time is +// available. This is done in accordance with section 2 of RFC 6298. +func (s *sender) updateRTO(rtt time.Duration) { + if !s.srttInited { + s.rttvar = rtt / 2 + s.srtt = rtt + s.srttInited = true + } else { + diff := s.srtt - rtt + if diff < 0 { + diff = -diff + } + s.rttvar = (3*s.rttvar + diff) / 4 + s.srtt = (7*s.srtt + rtt) / 8 + } + + s.rto = s.srtt + 4*s.rttvar + if s.rto < minRTO { + s.rto = minRTO + } +} + +// resendSegment resends the first unacknowledged segment. +func (s *sender) resendSegment() { + // Don't use any segments we already sent to measure RTT as they may + // have been affected by packets being lost. + s.rttMeasureSeqNum = s.sndNxt + + // Resend the segment. + if seg := s.writeList.Front(); seg != nil { + s.sendSegment(&seg.data, seg.flags, seg.sequenceNumber) + } +} + +// reduceSlowStartThreshold reduces the slow-start threshold per RFC 5681, +// page 6, eq. 4. It is called when we detect congestion in the network. +func (s *sender) reduceSlowStartThreshold() { + s.sndSsthresh = s.outstanding / 2 + if s.sndSsthresh < 2 { + s.sndSsthresh = 2 + } +} + +// retransmitTimerExpired is called when the retransmit timer expires, and +// unacknowledged segments are assumed lost, and thus need to be resent. +// Returns true if the connection is still usable, or false if the connection +// is deemed lost. +func (s *sender) retransmitTimerExpired() bool { + // Check if the timer actually expired or if it's a spurious wake due + // to a previously orphaned runtime timer. + if !s.resendTimer.checkExpiration() { + return true + } + + // Give up if we've waited more than a minute since the last resend. + if s.rto >= 60*time.Second { + return false + } + + // Set new timeout. The timer will be restarted by the call to sendData + // below. + s.rto *= 2 + + if s.fr.active { + // We were attempting fast recovery but were not successful. + // Leave the state. We don't need to update ssthresh because it + // has already been updated when entered fast-recovery. + s.leaveFastRecovery() + } + + // See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 4. + // We store the highest sequence number transmitted in cases where + // we were not in fast recovery. + s.fr.last = s.sndNxt - 1 + + // We lost a packet, so reduce ssthresh. + s.reduceSlowStartThreshold() + + // Reduce the congestion window to 1, i.e., enter slow-start. Per + // RFC 5681, page 7, we must use 1 regardless of the value of the + // initial congestion window. + s.sndCwnd = 1 + + // Mark the next segment to be sent as the first unacknowledged one and + // start sending again. Set the number of outstanding packets to 0 so + // that we'll be able to retransmit. + // + // We'll keep on transmitting (or retransmitting) as we get acks for + // the data we transmit. + s.outstanding = 0 + s.writeNext = s.writeList.Front() + s.sendData() + + return true +} + +// sendData sends new data segments. It is called when data becomes available or +// when the send window opens up. +func (s *sender) sendData() { + limit := s.maxPayloadSize + + // Reduce the congestion window to min(IW, cwnd) per RFC 5681, page 10. + // "A TCP SHOULD set cwnd to no more than RW before beginning + // transmission if the TCP has not sent data in the interval exceeding + // the retrasmission timeout." + if !s.fr.active && time.Now().Sub(s.lastSendTime) > s.rto { + if s.sndCwnd > InitialCwnd { + s.sndCwnd = InitialCwnd + } + } + + // TODO: We currently don't merge multiple send buffers + // into one segment if they happen to fit. We should do that + // eventually. + var seg *segment + end := s.sndUna.Add(s.sndWnd) + for seg = s.writeNext; seg != nil && s.outstanding < s.sndCwnd; seg = seg.Next() { + // We abuse the flags field to determine if we have already + // assigned a sequence number to this segment. + if seg.flags == 0 { + seg.sequenceNumber = s.sndNxt + seg.flags = flagAck | flagPsh + } + + var segEnd seqnum.Value + if seg.data.Size() == 0 { + seg.flags = flagAck + + s.ep.rcvListMu.Lock() + rcvBufUsed := s.ep.rcvBufUsed + s.ep.rcvListMu.Unlock() + + s.ep.mu.Lock() + // We're sending a FIN by default + fl := flagFin + if (s.ep.shutdownFlags&tcpip.ShutdownRead) != 0 && rcvBufUsed > 0 { + // If there is unread data we must send a RST. + // For more information see RFC 2525 section 2.17. + fl = flagRst + } + s.ep.mu.Unlock() + seg.flags |= uint8(fl) + + segEnd = seg.sequenceNumber.Add(1) + } else { + // We're sending a non-FIN segment. + if !seg.sequenceNumber.LessThan(end) { + break + } + + available := int(seg.sequenceNumber.Size(end)) + if available > limit { + available = limit + } + + if seg.data.Size() > available { + // Split this segment up. + nSeg := seg.clone() + nSeg.data.TrimFront(available) + nSeg.sequenceNumber.UpdateForward(seqnum.Size(available)) + s.writeList.InsertAfter(seg, nSeg) + seg.data.CapLength(available) + } + + s.outstanding++ + segEnd = seg.sequenceNumber.Add(seqnum.Size(seg.data.Size())) + } + + s.sendSegment(&seg.data, seg.flags, seg.sequenceNumber) + + // Update sndNxt if we actually sent new data (as opposed to + // retransmitting some previously sent data). + if s.sndNxt.LessThan(segEnd) { + s.sndNxt = segEnd + } + } + + // Remember the next segment we'll write. + s.writeNext = seg + + // Enable the timer if we have pending data and it's not enabled yet. + if !s.resendTimer.enabled() && s.sndUna != s.sndNxt { + s.resendTimer.enable(s.rto) + } +} + +func (s *sender) enterFastRecovery() { + // Save state to reflect we're now in fast recovery. + s.reduceSlowStartThreshold() + // Save state to reflect we're now in fast recovery. + // See : https://tools.ietf.org/html/rfc5681#section-3.2 Step 3. + // We inflat the cwnd by 3 to account for the 3 packets which triggered + // the 3 duplicate ACKs and are now not in flight. + s.sndCwnd = s.sndSsthresh + 3 + s.fr.first = s.sndUna + s.fr.last = s.sndNxt - 1 + s.fr.maxCwnd = s.sndCwnd + s.outstanding + s.fr.active = true +} + +func (s *sender) leaveFastRecovery() { + s.fr.active = false + s.fr.first = 0 + s.fr.last = s.sndNxt - 1 + s.fr.maxCwnd = 0 + s.dupAckCount = 0 + + // Deflate cwnd. It had been artificially inflated when new dups arrived. + s.sndCwnd = s.sndSsthresh +} + +// checkDuplicateAck is called when an ack is received. It manages the state +// related to duplicate acks and determines if a retransmit is needed according +// to the rules in RFC 6582 (NewReno). +func (s *sender) checkDuplicateAck(seg *segment) bool { + ack := seg.ackNumber + if s.fr.active { + // We are in fast recovery mode. Ignore the ack if it's out of + // range. + if !ack.InRange(s.sndUna, s.sndNxt+1) { + return false + } + + // Leave fast recovery if it acknowledges all the data covered by + // this fast recovery session. + if s.fr.last.LessThan(ack) { + s.leaveFastRecovery() + return false + } + + // Don't count this as a duplicate if it is carrying data or + // updating the window. + if seg.logicalLen() != 0 || s.sndWnd != seg.window { + return false + } + + // Inflate the congestion window if we're getting duplicate acks + // for the packet we retransmitted. + if ack == s.fr.first { + // We received a dup, inflate the congestion window by 1 + // packet if we're not at the max yet. + if s.sndCwnd < s.fr.maxCwnd { + s.sndCwnd++ + } + return false + } + + // A partial ack was received. Retransmit this packet and + // remember it so that we don't retransmit it again. We don't + // inflate the window because we're putting the same packet back + // onto the wire. + // + // N.B. The retransmit timer will be reset by the caller. + s.fr.first = ack + return true + } + + // We're not in fast recovery yet. A segment is considered a duplicate + // only if it doesn't carry any data and doesn't update the send window, + // because if it does, it wasn't sent in response to an out-of-order + // segment. + if ack != s.sndUna || seg.logicalLen() != 0 || s.sndWnd != seg.window || ack == s.sndNxt { + s.dupAckCount = 0 + return false + } + + // Enter fast recovery when we reach 3 dups. + s.dupAckCount++ + if s.dupAckCount != 3 { + return false + } + + // See: https://tools.ietf.org/html/rfc6582#section-3.2 Step 2 + // + // We only do the check here, the incrementing of last to the highest + // sequence number transmitted till now is done when enterFastRecovery + // is invoked. + if !s.fr.last.LessThan(seg.ackNumber) { + s.dupAckCount = 0 + return false + } + s.enterFastRecovery() + s.dupAckCount = 0 + return true +} + +// updateCwnd updates the congestion window based on the number of packets that +// were acknowledged. +func (s *sender) updateCwnd(packetsAcked int) { + if s.sndCwnd < s.sndSsthresh { + // Don't let the congestion window cross into the congestion + // avoidance range. + newcwnd := s.sndCwnd + packetsAcked + if newcwnd >= s.sndSsthresh { + newcwnd = s.sndSsthresh + s.sndCAAckCount = 0 + } + + packetsAcked -= newcwnd - s.sndCwnd + s.sndCwnd = newcwnd + if packetsAcked == 0 { + // We've consumed all ack'd packets. + return + } + } + + // Consume the packets in congestion avoidance mode. + s.sndCAAckCount += packetsAcked + if s.sndCAAckCount >= s.sndCwnd { + s.sndCwnd += s.sndCAAckCount / s.sndCwnd + s.sndCAAckCount = s.sndCAAckCount % s.sndCwnd + } +} + +// handleRcvdSegment is called when a segment is received; it is responsible for +// updating the send-related state. +func (s *sender) handleRcvdSegment(seg *segment) { + // Check if we can extract an RTT measurement from this ack. + if s.rttMeasureSeqNum.LessThan(seg.ackNumber) { + s.updateRTO(time.Now().Sub(s.rttMeasureTime)) + s.rttMeasureSeqNum = s.sndNxt + } + + // Update Timestamp if required. See RFC7323, section-4.3. + s.ep.updateRecentTimestamp(seg.parsedOptions.TSVal, s.maxSentAck, seg.sequenceNumber) + + // Count the duplicates and do the fast retransmit if needed. + rtx := s.checkDuplicateAck(seg) + + // Stash away the current window size. + s.sndWnd = seg.window + + // Ignore ack if it doesn't acknowledge any new data. + ack := seg.ackNumber + if (ack - 1).InRange(s.sndUna, s.sndNxt) { + // When an ack is received we must reset the timer. We stop it + // here and it will be restarted later if needed. + s.resendTimer.disable() + + // Remove all acknowledged data from the write list. + acked := s.sndUna.Size(ack) + s.sndUna = ack + + ackLeft := acked + originalOutstanding := s.outstanding + for ackLeft > 0 { + // We use logicalLen here because we can have FIN + // segments (which are always at the end of list) that + // have no data, but do consume a sequence number. + seg := s.writeList.Front() + datalen := seg.logicalLen() + + if datalen > ackLeft { + seg.data.TrimFront(int(ackLeft)) + break + } + + if s.writeNext == seg { + s.writeNext = seg.Next() + } + s.writeList.Remove(seg) + s.outstanding-- + seg.decRef() + ackLeft -= datalen + } + + // Update the send buffer usage and notify potential waiters. + s.ep.updateSndBufferUsage(int(acked)) + + // If we are not in fast recovery then update the congestion + // window based on the number of acknowledged packets. + if !s.fr.active { + s.updateCwnd(originalOutstanding - s.outstanding) + } + + // It is possible for s.outstanding to drop below zero if we get + // a retransmit timeout, reset outstanding to zero but later + // get an ack that cover previously sent data. + if s.outstanding < 0 { + s.outstanding = 0 + } + } + + // Now that we've popped all acknowledged data from the retransmit + // queue, retransmit if needed. + if rtx { + s.resendSegment() + } + + // Send more data now that some of the pending data has been ack'd, or + // that the window opened up, or the congestion window was inflated due + // to a duplicate ack during fast recovery. This will also re-enable + // the retransmit timer if needed. + s.sendData() +} + +// sendSegment sends a new segment containing the given payload, flags and +// sequence number. +func (s *sender) sendSegment(data *buffer.VectorisedView, flags byte, seq seqnum.Value) *tcpip.Error { + s.lastSendTime = time.Now() + if seq == s.rttMeasureSeqNum { + s.rttMeasureTime = s.lastSendTime + } + + rcvNxt, rcvWnd := s.ep.rcv.getSendParams() + + // Remember the max sent ack. + s.maxSentAck = rcvNxt + + if data == nil { + return s.ep.sendRaw(nil, flags, seq, rcvNxt, rcvWnd) + } + + if len(data.Views()) > 1 { + panic("send path does not support views with multiple buffers") + } + + return s.ep.sendRaw(data.First(), flags, seq, rcvNxt, rcvWnd) +} diff --git a/pkg/tcpip/transport/tcp/tcp_sack_test.go b/pkg/tcpip/transport/tcp/tcp_sack_test.go new file mode 100644 index 000000000..19de96dcb --- /dev/null +++ b/pkg/tcpip/transport/tcp/tcp_sack_test.go @@ -0,0 +1,336 @@ +package tcp_test + +import ( + "fmt" + "reflect" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp/testing/context" +) + +// createConnectWithSACKPermittedOption creates and connects c.ep with the +// SACKPermitted option enabled if the stack in the context has the SACK support +// enabled. +func createConnectedWithSACKPermittedOption(c *context.Context) *context.RawEndpoint { + return c.CreateConnectedWithOptions(header.TCPSynOptions{SACKPermitted: c.SACKEnabled()}) +} + +func setStackSACKPermitted(t *testing.T, c *context.Context, enable bool) { + t.Helper() + if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(enable)); err != nil { + t.Fatalf("c.s.SetTransportProtocolOption(tcp.ProtocolNumber, SACKEnabled(%v) = %v", enable, err) + } +} + +// TestSackPermittedConnect establishes a connection with the SACK option +// enabled. +func TestSackPermittedConnect(t *testing.T) { + for _, sackEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + setStackSACKPermitted(t, c, sackEnabled) + rep := createConnectedWithSACKPermittedOption(c) + data := []byte{1, 2, 3} + + rep.SendPacket(data, nil) + savedSeqNum := rep.NextSeqNum + rep.VerifyACKNoSACK() + + // Make an out of order packet and send it. + rep.NextSeqNum += 3 + sackBlocks := []header.SACKBlock{ + {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, + } + rep.SendPacket(data, nil) + + // Restore the saved sequence number so that the + // VerifyXXX calls use the right sequence number for + // checking ACK numbers. + rep.NextSeqNum = savedSeqNum + if sackEnabled { + rep.VerifyACKHasSACK(sackBlocks) + } else { + rep.VerifyACKNoSACK() + } + + // Send the missing segment. + rep.SendPacket(data, nil) + // The ACK should contain the cumulative ACK for all 9 + // bytes sent and no SACK blocks. + rep.NextSeqNum += 3 + // Check that no SACK block is returned in the ACK. + rep.VerifyACKNoSACK() + }) + } +} + +// TestSackDisabledConnect establishes a connection with the SACK option +// disabled and verifies that no SACKs are sent for out of order segments. +func TestSackDisabledConnect(t *testing.T) { + for _, sackEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("sackEnabled: %v", sackEnabled), func(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + setStackSACKPermitted(t, c, sackEnabled) + + rep := c.CreateConnectedWithOptions(header.TCPSynOptions{}) + + data := []byte{1, 2, 3} + + rep.SendPacket(data, nil) + savedSeqNum := rep.NextSeqNum + rep.VerifyACKNoSACK() + + // Make an out of order packet and send it. + rep.NextSeqNum += 3 + rep.SendPacket(data, nil) + + // The ACK should contain the older sequence number and + // no SACK blocks. + rep.NextSeqNum = savedSeqNum + rep.VerifyACKNoSACK() + + // Send the missing segment. + rep.SendPacket(data, nil) + // The ACK should contain the cumulative ACK for all 9 + // bytes sent and no SACK blocks. + rep.NextSeqNum += 3 + // Check that no SACK block is returned in the ACK. + rep.VerifyACKNoSACK() + }) + } +} + +// TestSackPermittedAccept accepts and establishes a connection with the +// SACKPermitted option enabled if the connection request specifies the +// SACKPermitted option. In case of SYN cookies SACK should be disabled as we +// don't encode the SACK information in the cookie. +func TestSackPermittedAccept(t *testing.T) { + type testCase struct { + cookieEnabled bool + sackPermitted bool + wndScale int + wndSize uint16 + } + + testCases := []testCase{ + // When cookie is used window scaling is disabled. + {true, false, -1, 0xffff}, // When cookie is used window scaling is disabled. + {false, true, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). + } + savedSynCountThreshold := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + }() + for _, tc := range testCases { + t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { + if tc.cookieEnabled { + tcp.SynRcvdCountThreshold = 0 + } else { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + } + for _, sackEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("test stack.sackEnabled: %v", sackEnabled), func(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + setStackSACKPermitted(t, c, sackEnabled) + + rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, SACKPermitted: tc.sackPermitted}) + // Now verify no SACK blocks are + // received when sack is disabled. + data := []byte{1, 2, 3} + rep.SendPacket(data, nil) + rep.VerifyACKNoSACK() + + savedSeqNum := rep.NextSeqNum + + // Make an out of order packet and send + // it. + rep.NextSeqNum += 3 + sackBlocks := []header.SACKBlock{ + {rep.NextSeqNum, rep.NextSeqNum.Add(seqnum.Size(len(data)))}, + } + rep.SendPacket(data, nil) + + // The ACK should contain the older + // sequence number. + rep.NextSeqNum = savedSeqNum + if sackEnabled && tc.sackPermitted { + rep.VerifyACKHasSACK(sackBlocks) + } else { + rep.VerifyACKNoSACK() + } + + // Send the missing segment. + rep.SendPacket(data, nil) + // The ACK should contain the cumulative + // ACK for all 9 bytes sent and no SACK + // blocks. + rep.NextSeqNum += 3 + // Check that no SACK block is returned + // in the ACK. + rep.VerifyACKNoSACK() + }) + } + }) + } +} + +// TestSackDisabledAccept accepts and establishes a connection with +// the SACKPermitted option disabled and verifies that no SACKs are +// sent for out of order packets. +func TestSackDisabledAccept(t *testing.T) { + type testCase struct { + cookieEnabled bool + wndScale int + wndSize uint16 + } + + testCases := []testCase{ + // When cookie is used window scaling is disabled. + {true, -1, 0xffff}, // When cookie is used window scaling is disabled. + {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). + } + savedSynCountThreshold := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + }() + for _, tc := range testCases { + t.Run(fmt.Sprintf("test: %#v", tc), func(t *testing.T) { + if tc.cookieEnabled { + tcp.SynRcvdCountThreshold = 0 + } else { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + } + for _, sackEnabled := range []bool{false, true} { + t.Run(fmt.Sprintf("test: sackEnabled: %v", sackEnabled), func(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + setStackSACKPermitted(t, c, sackEnabled) + + rep := c.AcceptWithOptions(tc.wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS}) + + // Now verify no SACK blocks are + // received when sack is disabled. + data := []byte{1, 2, 3} + rep.SendPacket(data, nil) + rep.VerifyACKNoSACK() + savedSeqNum := rep.NextSeqNum + + // Make an out of order packet and send + // it. + rep.NextSeqNum += 3 + rep.SendPacket(data, nil) + + // The ACK should contain the older + // sequence number and no SACK blocks. + rep.NextSeqNum = savedSeqNum + rep.VerifyACKNoSACK() + + // Send the missing segment. + rep.SendPacket(data, nil) + // The ACK should contain the cumulative + // ACK for all 9 bytes sent and no SACK + // blocks. + rep.NextSeqNum += 3 + // Check that no SACK block is returned + // in the ACK. + rep.VerifyACKNoSACK() + }) + } + }) + } +} + +func TestUpdateSACKBlocks(t *testing.T) { + testCases := []struct { + segStart seqnum.Value + segEnd seqnum.Value + rcvNxt seqnum.Value + sackBlocks []header.SACKBlock + updated []header.SACKBlock + }{ + // Trivial cases where current SACK block list is empty and we + // have an out of order delivery. + {10, 11, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 11}}}, + {10, 12, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 12}}}, + {10, 20, 2, []header.SACKBlock{}, []header.SACKBlock{{10, 20}}}, + + // Cases where current SACK block list is not empty and we have + // an out of order delivery. Tests that the updated SACK block + // list has the first block as the one that contains the new + // SACK block representing the segment that was just delivered. + {10, 11, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 11}, {12, 20}}}, + {24, 30, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{24, 30}, {12, 20}}}, + {24, 30, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}}}, + + // Ensure that we only retain header.MaxSACKBlocks and drop the + // oldest one if adding a new block exceeds + // header.MaxSACKBlocks. + {24, 30, 9, + []header.SACKBlock{{12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}, {72, 80}}, + []header.SACKBlock{{24, 30}, {12, 20}, {32, 40}, {42, 50}, {52, 60}, {62, 70}}}, + + // Cases where segment extends an existing SACK block. + {10, 12, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 20}}}, + {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, + {10, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{10, 22}}}, + {15, 22, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 22}}}, + {15, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{12, 25}}}, + {11, 25, 9, []header.SACKBlock{{12, 20}}, []header.SACKBlock{{11, 25}}}, + {10, 12, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 20}, {32, 40}}}, + {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, + {10, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{10, 22}, {32, 40}}}, + {15, 22, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 22}, {32, 40}}}, + {15, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{12, 25}, {32, 40}}}, + {11, 25, 9, []header.SACKBlock{{12, 20}, {32, 40}}, []header.SACKBlock{{11, 25}, {32, 40}}}, + + // Cases where segment contains rcvNxt. + {10, 20, 15, []header.SACKBlock{{20, 30}, {40, 50}}, []header.SACKBlock{{40, 50}}}, + } + + for _, tc := range testCases { + var sack tcp.SACKInfo + copy(sack.Blocks[:], tc.sackBlocks) + sack.NumBlocks = len(tc.sackBlocks) + tcp.UpdateSACKBlocks(&sack, tc.segStart, tc.segEnd, tc.rcvNxt) + if got, want := sack.Blocks[:sack.NumBlocks], tc.updated; !reflect.DeepEqual(got, want) { + t.Errorf("UpdateSACKBlocks(%v, %v, %v, %v), got: %v, want: %v", tc.sackBlocks, tc.segStart, tc.segEnd, tc.rcvNxt, got, want) + } + + } +} + +func TestTrimSackBlockList(t *testing.T) { + testCases := []struct { + rcvNxt seqnum.Value + sackBlocks []header.SACKBlock + trimmed []header.SACKBlock + }{ + // Simple cases where we trim whole entries. + {2, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}}, + {21, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{22, 30}, {32, 40}}}, + {31, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{32, 40}}}, + {40, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, + // Cases where we need to update a block. + {12, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{12, 20}, {22, 30}, {32, 40}}}, + {23, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{23, 30}, {32, 40}}}, + {33, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{{33, 40}}}, + {41, []header.SACKBlock{{10, 20}, {22, 30}, {32, 40}}, []header.SACKBlock{}}, + } + for _, tc := range testCases { + var sack tcp.SACKInfo + copy(sack.Blocks[:], tc.sackBlocks) + sack.NumBlocks = len(tc.sackBlocks) + tcp.TrimSACKBlockList(&sack, tc.rcvNxt) + if got, want := sack.Blocks[:sack.NumBlocks], tc.trimmed; !reflect.DeepEqual(got, want) { + t.Errorf("TrimSackBlockList(%v, %v), got: %v, want: %v", tc.sackBlocks, tc.rcvNxt, got, want) + } + } +} diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go new file mode 100644 index 000000000..118d861ba --- /dev/null +++ b/pkg/tcpip/transport/tcp/tcp_test.go @@ -0,0 +1,2759 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp_test + +import ( + "bytes" + "fmt" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/checker" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/loopback" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp/testing/context" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // defaultMTU is the MTU, in bytes, used throughout the tests, except + // where another value is explicitly used. It is chosen to match the MTU + // of loopback interfaces on linux systems. + defaultMTU = 65535 + + // defaultIPv4MSS is the MSS sent by the network stack in SYN/SYN-ACK for an + // IPv4 endpoint when the MTU is set to defaultMTU in the test. + defaultIPv4MSS = defaultMTU - header.IPv4MinimumSize - header.TCPMinimumSize +) + +func TestGiveUpConnect(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + var wq waiter.Queue + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Register for notification, then start connection attempt. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventOut) + defer wq.EventUnregister(&waitEntry) + + if err := ep.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort}); err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + + // Close the connection, wait for completion. + ep.Close() + + // Wait for ep to become writable. + <-notifyCh + if err := ep.GetSockOpt(tcpip.ErrorOption{}); err != tcpip.ErrAborted { + t.Fatalf("got ep.GetSockOpt(tcpip.ErrorOption{}) = %v, want = %v", err, tcpip.ErrAborted) + } +} + +func TestActiveHandshake(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) +} + +func TestNonBlockingClose(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + ep := c.EP + c.EP = nil + + // Close the endpoint and measure how long it takes. + t0 := time.Now() + ep.Close() + if diff := time.Now().Sub(t0); diff > 3*time.Second { + t.Fatalf("Took too long to close: %v", diff) + } +} + +func TestConnectResetAfterClose(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + ep := c.EP + c.EP = nil + + // Close the endpoint, make sure we get a FIN segment, then acknowledge + // to complete closure of sender, but don't send our own FIN. + ep.Close() + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Wait for the ep to give up waiting for a FIN, and send a RST. + time.Sleep(3 * time.Second) + for { + b := c.GetPacket() + tcp := header.TCP(header.IPv4(b).Payload()) + if tcp.Flags() == header.TCPFlagAck|header.TCPFlagFin { + // This is a retransmit of the FIN, ignore it. + continue + } + + checker.IPv4(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst), + ), + ) + break + } +} + +func TestSimpleReceive(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + data := []byte{1, 2, 3} + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Receive data. + v, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + + if bytes.Compare(data, v) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, v) + } + + // Check that ACK is received. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestOutOfOrderReceive(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Send second half of data first, with seqnum 3 ahead of expected. + data := []byte{1, 2, 3, 4, 5, 6} + c.SendPacket(data[3:], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 793, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Check that we get an ACK specifying which seqnum is expected. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + + // Wait 200ms and check that no data has been received. + time.Sleep(200 * time.Millisecond) + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Send the first 3 bytes now. + c.SendPacket(data[:3], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Receive data. + read := make([]byte, 0, 6) + for len(read) < len(data) { + v, err := c.EP.Read(nil) + if err != nil { + if err == tcpip.ErrWouldBlock { + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + continue + } + t.Fatalf("Unexpected error from Read: %v", err) + } + + read = append(read, v...) + } + + // Check that we received the data in proper order. + if bytes.Compare(data, read) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, read) + } + + // Check that the whole data is acknowledged. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestOutOfOrderFlood(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create a new connection with initial window size of 10. + opt := tcpip.ReceiveBufferSizeOption(10) + c.CreateConnected(789, 30000, &opt) + + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Send 100 packets before the actual one that is expected. + data := []byte{1, 2, 3, 4, 5, 6} + for i := 0; i < 100; i++ { + c.SendPacket(data[3:], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 796, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + } + + // Send packet with seqnum 793. It must be discarded because the + // out-of-order buffer was filled by the previous packets. + c.SendPacket(data[3:], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 793, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + + // Now send the expected packet, seqnum 790. + c.SendPacket(data[:3], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Check that only packet 790 is acknowledged. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(793), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestRstOnCloseWithUnreadData(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + data := []byte{1, 2, 3} + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(3 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Check that ACK is received, this happens regardless of the read. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + + // Now that we know we have unread data, let's just close the connection + // and verify that netstack sends an RST rather than a FIN. + c.EP.Close() + + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagRst), + )) +} + +func TestFullWindowReceive(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + opt := tcpip.ReceiveBufferSizeOption(10) + c.CreateConnected(789, 30000, &opt) + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + _, err := c.EP.Read(nil) + if err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Fill up the window. + data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Check that data is acknowledged, and window goes to zero. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + checker.Window(0), + ), + ) + + // Receive data and check it. + v, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + + if bytes.Compare(data, v) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, v) + } + + // Check that we get an ACK for the newly non-zero window. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + checker.Window(10), + ), + ) +} + +func TestNoWindowShrinking(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Start off with a window size of 10, then shrink it to 5. + opt := tcpip.ReceiveBufferSizeOption(10) + c.CreateConnected(789, 30000, &opt) + + opt = 5 + if err := c.EP.SetSockOpt(opt); err != nil { + t.Fatalf("SetSockOpt failed: %v", err) + } + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + _, err := c.EP.Read(nil) + if err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Send 3 bytes, check that the peer acknowledges them. + data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + c.SendPacket(data[:3], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Check that data is acknowledged, and that window doesn't go to zero + // just yet because it was previously set to 10. It must go to 7 now. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(793), + checker.TCPFlags(header.TCPFlagAck), + checker.Window(7), + ), + ) + + // Send 7 more bytes, check that the window fills up. + c.SendPacket(data[3:], &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 793, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + checker.Window(0), + ), + ) + + // Receive data and check it. + read := make([]byte, 0, 10) + for len(read) < len(data) { + v, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + + read = append(read, v...) + } + + if bytes.Compare(data, read) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, read) + } + + // Check that we get an ACK for the newly non-zero window, which is the + // new size. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+len(data))), + checker.TCPFlags(header.TCPFlagAck), + checker.Window(5), + ), + ) +} + +func TestSimpleSend(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; bytes.Compare(data, p) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, p) + } + + // Acknowledge the data. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1 + seqnum.Size(len(data))), + RcvWnd: 30000, + }) +} + +func TestZeroWindowSend(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 0, nil) + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}) + if err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Since the window is currently zero, check that no packet is received. + c.CheckNoPacket("Packet received when window is zero") + + // Open up the window. Data should be received now. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Check that data is received. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; bytes.Compare(data, p) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, p) + } + + // Acknowledge the data. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1 + seqnum.Size(len(data))), + RcvWnd: 30000, + }) +} + +func TestScaledWindowConnect(t *testing.T) { + // This test ensures that window scaling is used when the peer + // does advertise it and connection is established with Connect(). + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Set the window size greater than the maximum non-scaled window. + opt := tcpip.ReceiveBufferSizeOption(65535 * 3) + c.CreateConnectedWithRawOptions(789, 30000, &opt, []byte{ + header.TCPOptionWS, 3, 0, header.TCPOptionNOP, + }) + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received, and that advertised window is 0xbfff, + // that is, that it is scaled. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(0xbfff), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) +} + +func TestNonScaledWindowConnect(t *testing.T) { + // This test ensures that window scaling is not used when the peer + // doesn't advertise it and connection is established with Connect(). + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Set the window size greater than the maximum non-scaled window. + opt := tcpip.ReceiveBufferSizeOption(65535 * 3) + c.CreateConnected(789, 30000, &opt) + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received, and that advertised window is 0xffff, + // that is, that it's not scaled. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(0xffff), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) +} + +func TestScaledWindowAccept(t *testing.T) { + // This test ensures that window scaling is used when the peer + // does advertise it and connection is established with Accept(). + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create EP and start listening. + wq := &waiter.Queue{} + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + // Set the window size greater than the maximum non-scaled window. + if err := ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(65535 * 3)); err != nil { + t.Fatalf("SetSockOpt failed failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Do 3-way handshake. + c.PassiveConnectWithOptions(100, 2, header.TCPSynOptions{MSS: defaultIPv4MSS}) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + c.EP, _, err = ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + c.EP, _, err = ep.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received, and that advertised window is 0xbfff, + // that is, that it is scaled. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(0xbfff), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) +} + +func TestNonScaledWindowAccept(t *testing.T) { + // This test ensures that window scaling is not used when the peer + // doesn't advertise it and connection is established with Accept(). + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create EP and start listening. + wq := &waiter.Queue{} + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + // Set the window size greater than the maximum non-scaled window. + if err := ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(65535 * 3)); err != nil { + t.Fatalf("SetSockOpt failed failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Do 3-way handshake. + c.PassiveConnect(100, 2, header.TCPSynOptions{MSS: defaultIPv4MSS}) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + c.EP, _, err = ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + c.EP, _, err = ep.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received, and that advertised window is 0xffff, + // that is, that it's not scaled. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(0xffff), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) +} + +func TestZeroScaledWindowReceive(t *testing.T) { + // This test ensures that the endpoint sends a non-zero window size + // advertisement when the scaled window transitions from 0 to non-zero, + // but the actual window (not scaled) hasn't gotten to zero. + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Set the window size such that a window scale of 4 will be used. + const wnd = 65535 * 10 + const ws = uint32(4) + opt := tcpip.ReceiveBufferSizeOption(wnd) + c.CreateConnectedWithRawOptions(789, 30000, &opt, []byte{ + header.TCPOptionWS, 3, 0, header.TCPOptionNOP, + }) + + // Write chunks of 50000 bytes. + remain := wnd + sent := 0 + data := make([]byte, 50000) + for remain > len(data) { + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: seqnum.Value(790 + sent), + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + sent += len(data) + remain -= len(data) + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+sent)), + checker.Window(uint16(remain>>ws)), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + } + + // Make the window non-zero, but the scaled window zero. + if remain >= 16 { + data = data[:remain-15] + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: seqnum.Value(790 + sent), + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + sent += len(data) + remain -= len(data) + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+sent)), + checker.Window(0), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + } + + // Read some data. An ack should be sent in response to that. + v, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(790+sent)), + checker.Window(uint16(len(v)>>ws)), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) { + payloadMultiplier := 10 + dataLen := payloadMultiplier * maxPayload + data := make([]byte, dataLen) + for i := range data { + data[i] = byte(i) + } + + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received in chunks. + bytesReceived := 0 + numPackets := 0 + for bytesReceived != dataLen { + b := c.GetPacket() + numPackets++ + tcp := header.TCP(header.IPv4(b).Payload()) + payloadLen := len(tcp.Payload()) + checker.IPv4(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1+uint32(bytesReceived)), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + pdata := data[bytesReceived : bytesReceived+payloadLen] + if p := tcp.Payload(); bytes.Compare(pdata, p) != 0 { + t.Fatalf("Data is different: expected %v, got %v", pdata, p) + } + bytesReceived += payloadLen + var options []byte + if c.TimeStampEnabled { + // If timestamp option is enabled, echo back the timestamp and increment + // the TSEcr value included in the packet and send that back as the TSVal. + parsedOpts := tcp.ParsedOptions() + tsOpt := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP} + header.EncodeTSOption(parsedOpts.TSEcr+1, parsedOpts.TSVal, tsOpt[2:]) + options = tsOpt[:] + } + // Acknowledge the data. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1 + seqnum.Size(bytesReceived)), + RcvWnd: 30000, + TCPOpts: options, + }) + } + if numPackets == 1 { + t.Fatalf("expected write to be broken up into multiple packets, but got 1 packet") + } +} + +func TestSendGreaterThanMTU(t *testing.T) { + const maxPayload = 100 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + testBrokenUpWrite(t, c, maxPayload) +} + +func TestActiveSendMSSLessThanMTU(t *testing.T) { + const maxPayload = 100 + c := context.New(t, 65535) + defer c.Cleanup() + + c.CreateConnectedWithRawOptions(789, 30000, nil, []byte{ + header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256), + }) + testBrokenUpWrite(t, c, maxPayload) +} + +func TestPassiveSendMSSLessThanMTU(t *testing.T) { + const maxPayload = 100 + const mtu = 1200 + c := context.New(t, mtu) + defer c.Cleanup() + + // Create EP and start listening. + wq := &waiter.Queue{} + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + // Set the buffer size to a deterministic size so that we can check the + // window scaling option. + const rcvBufferSize = 0x20000 + const wndScale = 2 + if err := ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(rcvBufferSize)); err != nil { + t.Fatalf("SetSockOpt failed failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Do 3-way handshake. + c.PassiveConnect(maxPayload, wndScale, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize}) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + c.EP, _, err = ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + c.EP, _, err = ep.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + // Check that data gets properly segmented. + testBrokenUpWrite(t, c, maxPayload) +} + +func TestSynCookiePassiveSendMSSLessThanMTU(t *testing.T) { + const maxPayload = 536 + const mtu = 2000 + c := context.New(t, mtu) + defer c.Cleanup() + + // Set the SynRcvd threshold to zero to force a syn cookie based accept + // to happen. + saved := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = saved + }() + tcp.SynRcvdCountThreshold = 0 + + // Create EP and start listening. + wq := &waiter.Queue{} + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Do 3-way handshake. + c.PassiveConnect(maxPayload, -1, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize}) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + c.EP, _, err = ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + c.EP, _, err = ep.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + + // Check that data gets properly segmented. + testBrokenUpWrite(t, c, maxPayload) +} + +func TestForwarderSendMSSLessThanMTU(t *testing.T) { + const maxPayload = 100 + const mtu = 1200 + c := context.New(t, mtu) + defer c.Cleanup() + + s := c.Stack() + ch := make(chan *tcpip.Error, 1) + f := tcp.NewForwarder(s, 65536, 10, func(r *tcp.ForwarderRequest) { + var err *tcpip.Error + c.EP, err = r.CreateEndpoint(&c.WQ) + ch <- err + }) + s.SetTransportProtocolHandler(tcp.ProtocolNumber, f.HandlePacket) + + // Do 3-way handshake. + c.PassiveConnect(maxPayload, 1, header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize}) + + // Wait for connection to be available. + select { + case err := <-ch: + if err != nil { + t.Fatalf("Error creating endpoint: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatalf("Timed out waiting for connection") + } + + // Check that data gets properly segmented. + testBrokenUpWrite(t, c, maxPayload) +} + +func TestSynOptionsOnActiveConnect(t *testing.T) { + const mtu = 1400 + c := context.New(t, mtu) + defer c.Cleanup() + + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Set the buffer size to a deterministic size so that we can check the + // window scaling option. + const rcvBufferSize = 0x20000 + const wndScale = 2 + if err := c.EP.SetSockOpt(tcpip.ReceiveBufferSizeOption(rcvBufferSize)); err != nil { + t.Fatalf("SetSockOpt failed failed: %v", err) + } + + // Start connection attempt. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventOut) + defer c.WQ.EventUnregister(&we) + + err = c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort}) + if err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + + // Receive SYN packet. + b := c.GetPacket() + + checker.IPv4(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagSyn), + checker.TCPSynOptions(header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize, WS: wndScale}), + ), + ) + + tcp := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcp.SequenceNumber()) + + // Wait for retransmit. + time.Sleep(1 * time.Second) + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagSyn), + checker.SrcPort(tcp.SourcePort()), + checker.SeqNum(tcp.SequenceNumber()), + checker.TCPSynOptions(header.TCPSynOptions{MSS: mtu - header.IPv4MinimumSize - header.TCPMinimumSize, WS: wndScale}), + ), + ) + + // Send SYN-ACK. + iss := seqnum.Value(789) + c.SendPacket(nil, &context.Headers{ + SrcPort: tcp.DestinationPort(), + DstPort: tcp.SourcePort(), + Flags: header.TCPFlagSyn | header.TCPFlagAck, + SeqNum: iss, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + // Receive ACK packet. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(iss)+1), + ), + ) + + // Wait for connection to be established. + select { + case <-ch: + err = c.EP.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + t.Fatalf("Unexpected error when connecting: %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for connection") + } +} + +func TestCloseListener(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create listener. + var wq waiter.Queue + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Close the listener and measure how long it takes. + t0 := time.Now() + ep.Close() + if diff := time.Now().Sub(t0); diff > 3*time.Second { + t.Fatalf("Took too long to close: %v", diff) + } +} + +func TestReceiveOnResetConnection(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Send RST segment. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagRst, + SeqNum: 790, + RcvWnd: 30000, + }) + + // Try to read. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + +loop: + for { + switch _, err := c.EP.Read(nil); err { + case nil: + t.Fatalf("Unexpected success.") + case tcpip.ErrWouldBlock: + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for reset to arrive") + } + case tcpip.ErrConnectionReset: + break loop + default: + t.Fatalf("Unexpected error: want %v, got %v", tcpip.ErrConnectionReset, err) + } + } +} + +func TestSendOnResetConnection(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Send RST segment. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagRst, + SeqNum: 790, + RcvWnd: 30000, + }) + + // Wait for the RST to be received. + time.Sleep(1 * time.Second) + + // Try to write. + view := buffer.NewView(10) + _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}) + if err != tcpip.ErrConnectionReset { + t.Fatalf("Unexpected error from Write: want %v, got %v", tcpip.ErrConnectionReset, err) + } +} + +func TestFinImmediately(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Shutdown immediately, check that we get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + + // Ack and send FIN as well. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: c.IRS.Add(2), + RcvWnd: 30000, + }) + + // Check that the stack acks the FIN. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+2), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestFinRetransmit(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Shutdown immediately, check that we get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + + // Don't acknowledge yet. We should get a retransmit of the FIN. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + + // Ack and send FIN as well. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: c.IRS.Add(2), + RcvWnd: 30000, + }) + + // Check that the stack acks the FIN. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+2), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestFinWithNoPendingData(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Write something out, and have it acknowledged. + view := buffer.NewView(10) + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + next := uint32(c.IRS) + 1 + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + // Shutdown, check that we get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + next++ + + // Ack and send FIN as well. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + // Check that the stack acks the FIN. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestFinWithPendingDataCwndFull(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Write enough segments to fill the congestion window before ACK'ing + // any of them. + view := buffer.NewView(10) + for i := tcp.InitialCwnd; i > 0; i-- { + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + } + + next := uint32(c.IRS) + 1 + for i := tcp.InitialCwnd; i > 0; i-- { + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + } + + // Shutdown the connection, check that the FIN segment isn't sent + // because the congestion window doesn't allow it. Wait until a + // retransmit is received. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + // Send the ACK that will allow the FIN to be sent as well. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + next++ + + // Send a FIN that acknowledges everything. Get an ACK back. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestFinWithPendingData(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Write something out, and acknowledge it to get cwnd to 2. + view := buffer.NewView(10) + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + next := uint32(c.IRS) + 1 + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + // Write new data, but don't acknowledge it. + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + + // Shutdown the connection, check that we do get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + next++ + + // Send a FIN that acknowledges everything. Get an ACK back. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck), + ), + ) +} + +func TestFinWithPartialAck(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Write something out, and acknowledge it to get cwnd to 2. Also send + // FIN from the test side. + view := buffer.NewView(10) + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + next := uint32(c.IRS) + 1 + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) + + // Check that we get an ACK for the fin. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + // Write new data, but don't acknowledge it. + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(len(view)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + next += uint32(len(view)) + + // Shutdown the connection, check that we do get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(next), + checker.AckNum(791), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + next++ + + // Send an ACK for the data, but not for the FIN yet. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 791, + AckNum: seqnum.Value(next - 1), + RcvWnd: 30000, + }) + + // Check that we don't get a retransmit of the FIN. + c.CheckNoPacketTimeout("FIN retransmitted when data was ack'd", 100*time.Millisecond) + + // Ack the FIN. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 791, + AckNum: seqnum.Value(next), + RcvWnd: 30000, + }) +} + +func TestExponentialIncreaseDuringSlowStart(t *testing.T) { + maxPayload := 10 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + const iterations = 7 + data := buffer.NewView(maxPayload * (tcp.InitialCwnd << (iterations + 1))) + for i := range data { + data[i] = byte(i) + } + + // Write all the data in one shot. Packets will only be written at the + // MTU size though. + if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + expected := tcp.InitialCwnd + bytesRead := 0 + for i := 0; i < iterations; i++ { + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) + + // Acknowledge all the data received so far. + c.SendAck(790, bytesRead) + + // Double the number of expected packets for the next iteration. + expected *= 2 + } +} + +func TestCongestionAvoidance(t *testing.T) { + maxPayload := 10 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + const iterations = 7 + data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1))) + for i := range data { + data[i] = byte(i) + } + + // Write all the data in one shot. Packets will only be written at the + // MTU size though. + if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Do slow start for a few iterations. + expected := tcp.InitialCwnd + bytesRead := 0 + for i := 0; i < iterations; i++ { + expected = tcp.InitialCwnd << uint(i) + if i > 0 { + // Acknowledge all the data received so far if not on + // first iteration. + c.SendAck(790, bytesRead) + } + + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) + } + + // Don't acknowledge the first packet of the last packet train. Let's + // wait for them to time out, which will trigger a restart of slow + // start, and initialization of ssthresh to cwnd/2. + rtxOffset := bytesRead - maxPayload*expected + c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload) + + // Acknowledge all the data received so far. + c.SendAck(790, bytesRead) + + // This part is tricky: when the timeout happened, we had "expected" + // packets pending, cwnd reset to 1, and ssthresh set to expected/2. + // By acknowledging "expected" packets, the slow-start part will + // increase cwnd to expected/2 (which "consumes" expected/2-1 of the + // acknowledgements), then the congestion avoidance part will consume + // an extra expected/2 acks to take cwnd to expected/2 + 1. One ack + // remains in the "ack count" (which will cause cwnd to be incremented + // once it reaches cwnd acks). + // + // So we're straight into congestion avoidance with cwnd set to + // expected/2 + 1. + // + // Check that packets trains of cwnd packets are sent, and that cwnd is + // incremented by 1 after we acknowledge each packet. + expected = expected/2 + 1 + for i := 0; i < iterations; i++ { + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) + + // Acknowledge all the data received so far. + c.SendAck(790, bytesRead) + + // In cogestion avoidance, the packets trains increase by 1 in + // each iteration. + expected++ + } +} + +func TestFastRecovery(t *testing.T) { + maxPayload := 10 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + const iterations = 7 + data := buffer.NewView(2 * maxPayload * (tcp.InitialCwnd << (iterations + 1))) + for i := range data { + data[i] = byte(i) + } + + // Write all the data in one shot. Packets will only be written at the + // MTU size though. + if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Do slow start for a few iterations. + expected := tcp.InitialCwnd + bytesRead := 0 + for i := 0; i < iterations; i++ { + expected = tcp.InitialCwnd << uint(i) + if i > 0 { + // Acknowledge all the data received so far if not on + // first iteration. + c.SendAck(790, bytesRead) + } + + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) + } + + // Send 3 duplicate acks. This should force an immediate retransmit of + // the pending packet and put the sender into fast recovery. + rtxOffset := bytesRead - maxPayload*expected + for i := 0; i < 3; i++ { + c.SendAck(790, rtxOffset) + } + + // Receive the retransmitted packet. + c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload) + + // Now send 7 mode duplicate acks. Each of these should cause a window + // inflation by 1 and cause the sender to send an extra packet. + for i := 0; i < 7; i++ { + c.SendAck(790, rtxOffset) + } + + recover := bytesRead + + // Ensure no new packets arrive. + c.CheckNoPacketTimeout("More packets received than expected during recovery after dupacks for this cwnd.", + 50*time.Millisecond) + + // Acknowledge half of the pending data. + rtxOffset = bytesRead - expected*maxPayload/2 + c.SendAck(790, rtxOffset) + + // Receive the retransmit due to partial ack. + c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload) + + // Receive the 10 extra packets that should have been released due to + // the congestion window inflation in recovery. + for i := 0; i < 10; i++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // A partial ACK during recovery should reduce congestion window by the + // number acked. Since we had "expected" packets outstanding before sending + // partial ack and we acked expected/2 , the cwnd and outstanding should + // be expected/2 + 7. Which means the sender should not send any more packets + // till we ack this one. + c.CheckNoPacketTimeout("More packets received than expected during recovery after partial ack for this cwnd.", + 50*time.Millisecond) + + // Acknowledge all pending data to recover point. + c.SendAck(790, recover) + + // At this point, the cwnd should reset to expected/2 and there are 10 + // packets outstanding. + // + // NOTE: Technically netstack is incorrect in that we adjust the cwnd on + // the same segment that takes us out of recovery. But because of that + // the actual cwnd at exit of recovery will be expected/2 + 1 as we + // acked a cwnd worth of packets which will increase the cwnd further by + // 1 in congestion avoidance. + // + // Now in the first iteration since there are 10 packets outstanding. + // We would expect to get expected/2 +1 - 10 packets. But subsequent + // iterations will send us expected/2 + 1 + 1 (per iteration). + expected = expected/2 + 1 - 10 + for i := 0; i < iterations; i++ { + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout(fmt.Sprintf("More packets received(after deflation) than expected %d for this cwnd.", expected), 50*time.Millisecond) + + // Acknowledge all the data received so far. + c.SendAck(790, bytesRead) + + // In cogestion avoidance, the packets trains increase by 1 in + // each iteration. + if i == 0 { + // After the first iteration we expect to get the full + // congestion window worth of packets in every + // iteration. + expected += 10 + } + expected++ + } +} + +func TestRetransmit(t *testing.T) { + maxPayload := 10 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + const iterations = 7 + data := buffer.NewView(maxPayload * (tcp.InitialCwnd << (iterations + 1))) + for i := range data { + data[i] = byte(i) + } + + // Write all the data in two shots. Packets will only be written at the + // MTU size though. + half := data[:len(data)/2] + if _, err := c.EP.Write(tcpip.SlicePayload(half), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + half = data[len(data)/2:] + if _, err := c.EP.Write(tcpip.SlicePayload(half), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Do slow start for a few iterations. + expected := tcp.InitialCwnd + bytesRead := 0 + for i := 0; i < iterations; i++ { + expected = tcp.InitialCwnd << uint(i) + if i > 0 { + // Acknowledge all the data received so far if not on + // first iteration. + c.SendAck(790, bytesRead) + } + + // Read all packets expected on this iteration. Don't + // acknowledge any of them just yet, so that we can measure the + // congestion window. + for j := 0; j < expected; j++ { + c.ReceiveAndCheckPacket(data, bytesRead, maxPayload) + bytesRead += maxPayload + } + + // Check we don't receive any more packets on this iteration. + // The timeout can't be too high or we'll trigger a timeout. + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) + } + + // Wait for a timeout and retransmit. + rtxOffset := bytesRead - maxPayload*expected + c.ReceiveAndCheckPacket(data, rtxOffset, maxPayload) + + // Acknowledge half of the pending data. + rtxOffset = bytesRead - expected*maxPayload/2 + c.SendAck(790, rtxOffset) + + // Receive the remaining data, making sure that acknowledged data is not + // retransmitted. + for offset := rtxOffset; offset < len(data); offset += maxPayload { + c.ReceiveAndCheckPacket(data, offset, maxPayload) + c.SendAck(790, offset+maxPayload) + } + + c.CheckNoPacketTimeout("More packets received than expected for this cwnd.", 50*time.Millisecond) +} + +func TestUpdateListenBacklog(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // Create listener. + var wq waiter.Queue + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + // Update the backlog with another Listen() on the same endpoint. + if err := ep.Listen(20); err != nil { + t.Fatalf("Listen failed to update backlog: %v", err) + } + + ep.Close() +} + +func scaledSendWindow(t *testing.T, scale uint8) { + // This test ensures that the endpoint is using the right scaling by + // sending a buffer that is larger than the window size, and ensuring + // that the endpoint doesn't send more than allowed. + c := context.New(t, defaultMTU) + defer c.Cleanup() + + maxPayload := defaultMTU - header.IPv4MinimumSize - header.TCPMinimumSize + c.CreateConnectedWithRawOptions(789, 0, nil, []byte{ + header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256), + header.TCPOptionWS, 3, scale, header.TCPOptionNOP, + }) + + // Open up the window with a scaled value. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 1, + }) + + // Send some data. Check that it's capped by the window size. + view := buffer.NewView(65535) + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that only data that fits in the scaled window is sent. + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen((1<<scale)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + // Reset the connection to free resources. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagRst, + SeqNum: 790, + }) +} + +func TestScaledSendWindow(t *testing.T) { + for scale := uint8(0); scale <= 14; scale++ { + scaledSendWindow(t, scale) + } +} + +func TestReceivedSegmentQueuing(t *testing.T) { + // This test sends 200 segments containing a few bytes each to an + // endpoint and checks that they're all received and acknowledged by + // the endpoint, that is, that none of the segments are dropped by + // internal queues. + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + // Send 200 segments. + data := []byte{1, 2, 3} + for i := 0; i < 200; i++ { + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: seqnum.Value(790 + i*len(data)), + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + } + + // Receive ACKs for all segments. + last := seqnum.Value(790 + 200*len(data)) + for { + b := c.GetPacket() + checker.IPv4(t, b, + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + tcp := header.TCP(header.IPv4(b).Payload()) + ack := seqnum.Value(tcp.AckNumber()) + if ack == last { + break + } + + if last.LessThan(ack) { + t.Fatalf("Acknowledge (%v) beyond the expected (%v)", ack, last) + } + } +} + +func TestReadAfterClosedState(t *testing.T) { + // This test ensures that calling Read() or Peek() after the endpoint + // has transitioned to closedState still works if there is pending + // data. To transition to stateClosed without calling Close(), we must + // shutdown the send path and the peer must send its own FIN. + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnected(789, 30000, nil) + + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + if _, err := c.EP.Read(nil); err != tcpip.ErrWouldBlock { + t.Fatalf("Unexpected error from Read: %v", err) + } + + // Shutdown immediately for write, check that we get a FIN. + if err := c.EP.Shutdown(tcpip.ShutdownWrite); err != nil { + t.Fatalf("Unexpected error from Shutdown: %v", err) + } + + checker.IPv4(t, c.GetPacket(), + checker.PayloadLen(header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.TCPFlags(header.TCPFlagAck|header.TCPFlagFin), + ), + ) + + // Send some data and acknowledge the FIN. + data := []byte{1, 2, 3} + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck | header.TCPFlagFin, + SeqNum: 790, + AckNum: c.IRS.Add(2), + RcvWnd: 30000, + }) + + // Check that ACK is received. + checker.IPv4(t, c.GetPacket(), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+2), + checker.AckNum(uint32(791+len(data))), + checker.TCPFlags(header.TCPFlagAck), + ), + ) + + // Give the stack the chance to transition to closed state. + time.Sleep(1 * time.Second) + + // Wait for receive to be notified. + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Check that peek works. + peekBuf := make([]byte, 10) + n, err := c.EP.Peek([][]byte{peekBuf}) + if err != nil { + t.Fatalf("Unexpected error from Peek: %v", err) + } + + peekBuf = peekBuf[:n] + if bytes.Compare(data, peekBuf) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, peekBuf) + } + + // Receive data. + v, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + + if bytes.Compare(data, v) != 0 { + t.Fatalf("Data is different: expected %v, got %v", data, v) + } + + // Now that we drained the queue, check that functions fail with the + // right error code. + if _, err := c.EP.Read(nil); err != tcpip.ErrClosedForReceive { + t.Fatalf("Unexpected return from Read: got %v, want %v", err, tcpip.ErrClosedForReceive) + } + + if _, err := c.EP.Peek([][]byte{peekBuf}); err != tcpip.ErrClosedForReceive { + t.Fatalf("Unexpected return from Peek: got %v, want %v", err, tcpip.ErrClosedForReceive) + } +} + +func TestReusePort(t *testing.T) { + // This test ensures that ports are immediately available for reuse + // after Close on the endpoints using them returns. + c := context.New(t, defaultMTU) + defer c.Cleanup() + + // First case, just an endpoint that was bound. + var err *tcpip.Error + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + c.EP.Close() + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + c.EP.Close() + + // Second case, an endpoint that was bound and is connecting.. + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + err = c.EP.Connect(tcpip.FullAddress{Addr: context.TestAddr, Port: context.TestPort}) + if err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + c.EP.Close() + + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + c.EP.Close() + + // Third case, an endpoint that was bound and is listening. + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + err = c.EP.Listen(10) + if err != nil { + t.Fatalf("Listen failed: %v", err) + } + c.EP.Close() + + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + err = c.EP.Listen(10) + if err != nil { + t.Fatalf("Listen failed: %v", err) + } +} + +func checkRecvBufferSize(t *testing.T, ep tcpip.Endpoint, v int) { + t.Helper() + + var s tcpip.ReceiveBufferSizeOption + if err := ep.GetSockOpt(&s); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + if int(s) != v { + t.Fatalf("Bad receive buffer size: want=%v, got=%v", v, s) + } +} + +func checkSendBufferSize(t *testing.T, ep tcpip.Endpoint, v int) { + t.Helper() + + var s tcpip.SendBufferSizeOption + if err := ep.GetSockOpt(&s); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + if int(s) != v { + t.Fatalf("Bad send buffer size: want=%v, got=%v", v, s) + } +} + +func TestDefaultBufferSizes(t *testing.T) { + s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}) + + // Check the default values. + ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + defer func() { + if ep != nil { + ep.Close() + } + }() + + checkSendBufferSize(t, ep, tcp.DefaultBufferSize) + checkRecvBufferSize(t, ep, tcp.DefaultBufferSize) + + // Change the default send buffer size. + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SendBufferSizeOption{1, tcp.DefaultBufferSize * 2, tcp.DefaultBufferSize * 20}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + ep.Close() + ep, err = s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + + checkSendBufferSize(t, ep, tcp.DefaultBufferSize*2) + checkRecvBufferSize(t, ep, tcp.DefaultBufferSize) + + // Change the default receive buffer size. + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.ReceiveBufferSizeOption{1, tcp.DefaultBufferSize * 3, tcp.DefaultBufferSize * 30}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + ep.Close() + ep, err = s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + + checkSendBufferSize(t, ep, tcp.DefaultBufferSize*2) + checkRecvBufferSize(t, ep, tcp.DefaultBufferSize*3) +} + +func TestMinMaxBufferSizes(t *testing.T) { + s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}) + + // Check the default values. + ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &waiter.Queue{}) + if err != nil { + t.Fatalf("NewEndpoint failed; %v", err) + } + defer ep.Close() + + // Change the min/max values for send/receive + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.ReceiveBufferSizeOption{200, tcp.DefaultBufferSize * 2, tcp.DefaultBufferSize * 20}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SendBufferSizeOption{300, tcp.DefaultBufferSize * 3, tcp.DefaultBufferSize * 30}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + // Set values below the min. + if err := ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(199)); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + checkRecvBufferSize(t, ep, 200) + + if err := ep.SetSockOpt(tcpip.SendBufferSizeOption(299)); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + checkSendBufferSize(t, ep, 300) + + // Set values above the max. + if err := ep.SetSockOpt(tcpip.ReceiveBufferSizeOption(1 + tcp.DefaultBufferSize*20)); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + checkRecvBufferSize(t, ep, tcp.DefaultBufferSize*20) + + if err := ep.SetSockOpt(tcpip.SendBufferSizeOption(1 + tcp.DefaultBufferSize*30)); err != nil { + t.Fatalf("GetSockOpt failed: %v", err) + } + + checkSendBufferSize(t, ep, tcp.DefaultBufferSize*30) +} + +func TestSelfConnect(t *testing.T) { + // This test ensures that intentional self-connects work. In particular, + // it checks that if an endpoint binds to say 127.0.0.1:1000 then + // connects to 127.0.0.1:1000, then it will be connected to itself, and + // is able to send and receive data through the same endpoint. + s := stack.New([]string{ipv4.ProtocolName}, []string{tcp.ProtocolName}) + + id := loopback.New() + if testing.Verbose() { + id = sniffer.New(id) + } + + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, context.StackAddr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + { + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + }) + + var wq waiter.Queue + ep, err := s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}, nil); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Register for notification, then start connection attempt. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + wq.EventRegister(&waitEntry, waiter.EventOut) + defer wq.EventUnregister(&waitEntry) + + err = ep.Connect(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort}) + if err != tcpip.ErrConnectStarted { + t.Fatalf("Unexpected return value from Connect: %v", err) + } + + <-notifyCh + err = ep.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + t.Fatalf("Connect failed: %v", err) + } + + // Write something. + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + if _, err = ep.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Write failed: %v", err) + } + + // Read back what was written. + wq.EventUnregister(&waitEntry) + wq.EventRegister(&waitEntry, waiter.EventIn) + rd, err := ep.Read(nil) + if err != nil { + if err != tcpip.ErrWouldBlock { + t.Fatalf("Read failed: %v", err) + } + <-notifyCh + rd, err = ep.Read(nil) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + } + + if bytes.Compare(data, rd) != 0 { + t.Fatalf("Data is different: want=%v, got=%v", data, rd) + } +} + +func TestPathMTUDiscovery(t *testing.T) { + // This test verifies the stack retransmits packets after it receives an + // ICMP packet indicating that the path MTU has been exceeded. + c := context.New(t, 1500) + defer c.Cleanup() + + // Create new connection with MSS of 1460. + const maxPayload = 1500 - header.TCPMinimumSize - header.IPv4MinimumSize + c.CreateConnectedWithRawOptions(789, 30000, nil, []byte{ + header.TCPOptionMSS, 4, byte(maxPayload / 256), byte(maxPayload % 256), + }) + + // Send 3200 bytes of data. + const writeSize = 3200 + data := buffer.NewView(writeSize) + for i := range data { + data[i] = byte(i) + } + + if _, err := c.EP.Write(tcpip.SlicePayload(data), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Write failed: %v", err) + } + + receivePackets := func(c *context.Context, sizes []int, which int, seqNum uint32) []byte { + var ret []byte + for i, size := range sizes { + p := c.GetPacket() + if i == which { + ret = p + } + checker.IPv4(t, p, + checker.PayloadLen(size+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(seqNum), + checker.AckNum(790), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + seqNum += uint32(size) + } + return ret + } + + // Receive three packets. + sizes := []int{maxPayload, maxPayload, writeSize - 2*maxPayload} + first := receivePackets(c, sizes, 0, uint32(c.IRS)+1) + + // Send "packet too big" messages back to netstack. + const newMTU = 1200 + const newMaxPayload = newMTU - header.IPv4MinimumSize - header.TCPMinimumSize + mtu := []byte{0, 0, newMTU / 256, newMTU % 256} + c.SendICMPPacket(header.ICMPv4DstUnreachable, header.ICMPv4FragmentationNeeded, mtu, first, newMTU) + + // See retransmitted packets. None exceeding the new max. + sizes = []int{newMaxPayload, maxPayload - newMaxPayload, newMaxPayload, maxPayload - newMaxPayload, writeSize - 2*maxPayload} + receivePackets(c, sizes, -1, uint32(c.IRS)+1) +} + +func TestTCPEndpointProbe(t *testing.T) { + c := context.New(t, 1500) + defer c.Cleanup() + + invoked := make(chan struct{}) + c.Stack().AddTCPProbe(func(state stack.TCPEndpointState) { + // Validate that the endpoint ID is what we expect. + // + // We don't do an extensive validation of every field but a + // basic sanity test. + if got, want := state.ID.LocalAddress, tcpip.Address(context.StackAddr); got != want { + t.Fatalf("unexpected LocalAddress got: %d, want: %d", got, want) + } + if got, want := state.ID.LocalPort, c.Port; got != want { + t.Fatalf("unexpected LocalPort got: %d, want: %d", got, want) + } + if got, want := state.ID.RemoteAddress, tcpip.Address(context.TestAddr); got != want { + t.Fatalf("unexpected RemoteAddress got: %d, want: %d", got, want) + } + if got, want := state.ID.RemotePort, uint16(context.TestPort); got != want { + t.Fatalf("unexpected RemotePort got: %d, want: %d", got, want) + } + + invoked <- struct{}{} + }) + + c.CreateConnected(789, 30000, nil) + + data := []byte{1, 2, 3} + c.SendPacket(data, &context.Headers{ + SrcPort: context.TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: 790, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + }) + + select { + case <-invoked: + case <-time.After(100 * time.Millisecond): + t.Fatalf("TCP Probe function was not called") + } +} diff --git a/pkg/tcpip/transport/tcp/tcp_timestamp_test.go b/pkg/tcpip/transport/tcp/tcp_timestamp_test.go new file mode 100644 index 000000000..ae1b578c5 --- /dev/null +++ b/pkg/tcpip/transport/tcp/tcp_timestamp_test.go @@ -0,0 +1,302 @@ +package tcp_test + +import ( + "bytes" + "math/rand" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/checker" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp/testing/context" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// createConnectedWithTimestampOption creates and connects c.ep with the +// timestamp option enabled. +func createConnectedWithTimestampOption(c *context.Context) *context.RawEndpoint { + return c.CreateConnectedWithOptions(header.TCPSynOptions{TS: true, TSVal: 1}) +} + +// TestTimeStampEnabledConnect tests that netstack sends the timestamp option on +// an active connect and sets the TS Echo Reply fields correctly when the +// SYN-ACK also indicates support for the TS option and provides a TSVal. +func TestTimeStampEnabledConnect(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + rep := createConnectedWithTimestampOption(c) + + // Register for read and validate that we have data to read. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + // The following tests ensure that TS option once enabled behaves + // correctly as described in + // https://tools.ietf.org/html/rfc7323#section-4.3. + // + // We are not testing delayed ACKs here, but we do test out of order + // packet delivery and filling the sequence number hole created due to + // the out of order packet. + // + // The test also verifies that the sequence numbers and timestamps are + // as expected. + data := []byte{1, 2, 3} + + // First we increment tsVal by a small amount. + tsVal := rep.TSVal + 100 + rep.SendPacketWithTS(data, tsVal) + rep.VerifyACKWithTS(tsVal) + + // Next we send an out of order packet. + rep.NextSeqNum += 3 + tsVal += 200 + rep.SendPacketWithTS(data, tsVal) + + // The ACK should contain the original sequenceNumber and an older TS. + rep.NextSeqNum -= 6 + rep.VerifyACKWithTS(tsVal - 200) + + // Next we fill the hole and the returned ACK should contain the + // cumulative sequence number acking all data sent till now and have the + // latest timestamp sent below in its TSEcr field. + tsVal -= 100 + rep.SendPacketWithTS(data, tsVal) + rep.NextSeqNum += 3 + rep.VerifyACKWithTS(tsVal) + + // Increment tsVal by a large value that doesn't result in a wrap around. + tsVal += 0x7fffffff + rep.SendPacketWithTS(data, tsVal) + rep.VerifyACKWithTS(tsVal) + + // Increment tsVal again by a large value which should cause the + // timestamp value to wrap around. The returned ACK should contain the + // wrapped around timestamp in its tsEcr field and not the tsVal from + // the previous packet sent above. + tsVal += 0x7fffffff + rep.SendPacketWithTS(data, tsVal) + rep.VerifyACKWithTS(tsVal) + + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // There should be 5 views to read and each of them should + // contain the same data. + for i := 0; i < 5; i++ { + got, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + if want := data; bytes.Compare(got, want) != 0 { + t.Fatalf("Data is different: got: %v, want: %v", got, want) + } + } +} + +// TestTimeStampDisabledConnect tests that netstack sends timestamp option on an +// active connect but if the SYN-ACK doesn't specify the TS option then +// timestamp option is not enabled and future packets do not contain a +// timestamp. +func TestTimeStampDisabledConnect(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + + c.CreateConnectedWithOptions(header.TCPSynOptions{}) +} + +func timeStampEnabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wndSize uint16) { + savedSynCountThreshold := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + }() + + if cookieEnabled { + tcp.SynRcvdCountThreshold = 0 + } + c := context.New(t, defaultMTU) + defer c.Cleanup() + + t.Logf("Test w/ CookieEnabled = %v", cookieEnabled) + tsVal := rand.Uint32() + c.AcceptWithOptions(wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS, TS: true, TSVal: tsVal}) + + // Now send some data and validate that timestamp is echoed correctly in the ACK. + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received and that the timestamp option TSEcr field + // matches the expected value. + b := c.GetPacket() + checker.IPv4(t, b, + // Add 12 bytes for the timestamp option + 2 NOPs to align at 4 + // byte boundary. + checker.PayloadLen(len(data)+header.TCPMinimumSize+12), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(wndSize), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + checker.TCPTimestampChecker(true, 0, tsVal+1), + ), + ) +} + +// TestTimeStampEnabledAccept tests that if the SYN on a passive connect +// specifies the Timestamp option then the Timestamp option is sent on a SYN-ACK +// and echoes the tsVal field of the original SYN in the tcEcr field of the +// SYN-ACK. We cover the cases where SYN cookies are enabled/disabled and verify +// that Timestamp option is enabled in both cases if requested in the original +// SYN. +func TestTimeStampEnabledAccept(t *testing.T) { + testCases := []struct { + cookieEnabled bool + wndScale int + wndSize uint16 + }{ + {true, -1, 0xffff}, // When cookie is used window scaling is disabled. + {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). + } + for _, tc := range testCases { + timeStampEnabledAccept(t, tc.cookieEnabled, tc.wndScale, tc.wndSize) + } +} + +func timeStampDisabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wndSize uint16) { + savedSynCountThreshold := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = savedSynCountThreshold + }() + if cookieEnabled { + tcp.SynRcvdCountThreshold = 0 + } + + c := context.New(t, defaultMTU) + defer c.Cleanup() + + t.Logf("Test w/ CookieEnabled = %v", cookieEnabled) + c.AcceptWithOptions(wndScale, header.TCPSynOptions{MSS: defaultIPv4MSS}) + + // Now send some data with the accepted connection endpoint and validate + // that no timestamp option is sent in the TCP segment. + data := []byte{1, 2, 3} + view := buffer.NewView(len(data)) + copy(view, data) + + if _, err := c.EP.Write(tcpip.SlicePayload(view), tcpip.WriteOptions{}); err != nil { + t.Fatalf("Unexpected error from Write: %v", err) + } + + // Check that data is received and that the timestamp option is disabled + // when SYN cookies are enabled/disabled. + b := c.GetPacket() + checker.IPv4(t, b, + checker.PayloadLen(len(data)+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(context.TestPort), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(790), + checker.Window(wndSize), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + checker.TCPTimestampChecker(false, 0, 0), + ), + ) +} + +// TestTimeStampDisabledAccept tests that Timestamp option is not used when the +// peer doesn't advertise it and connection is established with Accept(). +func TestTimeStampDisabledAccept(t *testing.T) { + testCases := []struct { + cookieEnabled bool + wndScale int + wndSize uint16 + }{ + {true, -1, 0xffff}, // When cookie is used window scaling is disabled. + {false, 5, 0x8000}, // 0x8000 * 2^5 = 1<<20 = 1MB window (the default). + } + for _, tc := range testCases { + timeStampDisabledAccept(t, tc.cookieEnabled, tc.wndScale, tc.wndSize) + } +} + +func TestSendGreaterThanMTUWithOptions(t *testing.T) { + const maxPayload = 100 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + createConnectedWithTimestampOption(c) + testBrokenUpWrite(t, c, maxPayload) +} + +func TestSegmentDropWhenTimestampMissing(t *testing.T) { + const maxPayload = 100 + c := context.New(t, uint32(header.TCPMinimumSize+header.IPv4MinimumSize+maxPayload)) + defer c.Cleanup() + + rep := createConnectedWithTimestampOption(c) + + // Register for read. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) + + stk := c.Stack() + droppedPackets := stk.Stats().DroppedPackets + data := []byte{1, 2, 3} + // Save the sequence number as we will reset it later down + // in the test. + savedSeqNum := rep.NextSeqNum + rep.SendPacket(data, nil) + + select { + case <-ch: + t.Fatalf("Got data to read when we expect packet to be dropped") + case <-time.After(1 * time.Second): + // We expect that no data will be available to read. + } + + // Assert that DroppedPackets was incremented by 1. + if got, want := stk.Stats().DroppedPackets, droppedPackets+1; got != want { + t.Fatalf("incorrect number of dropped packets, got: %v, want: %v", got, want) + } + + droppedPackets = stk.Stats().DroppedPackets + // Reset the sequence number so that the other endpoint accepts + // this segment and does not treat it like an out of order delivery. + rep.NextSeqNum = savedSeqNum + // Now send a packet with timestamp and we should get the data. + rep.SendPacketWithTS(data, rep.TSVal+1) + + select { + case <-ch: + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for data to arrive") + } + + // Assert that DroppedPackets was not incremented by 1. + if got, want := stk.Stats().DroppedPackets, droppedPackets; got != want { + t.Fatalf("incorrect number of dropped packets, got: %v, want: %v", got, want) + } + + // Issue a read and we should data. + got, err := c.EP.Read(nil) + if err != nil { + t.Fatalf("Unexpected error from Read: %v", err) + } + if want := data; bytes.Compare(got, want) != 0 { + t.Fatalf("Data is different: got: %v, want: %v", got, want) + } +} diff --git a/pkg/tcpip/transport/tcp/testing/context/BUILD b/pkg/tcpip/transport/tcp/testing/context/BUILD new file mode 100644 index 000000000..40850c3e7 --- /dev/null +++ b/pkg/tcpip/transport/tcp/testing/context/BUILD @@ -0,0 +1,27 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "context", + testonly = 1, + srcs = ["context.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp/testing/context", + visibility = [ + "//:sandbox", + ], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/checker", + "//pkg/tcpip/header", + "//pkg/tcpip/link/channel", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/seqnum", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/waiter", + ], +) diff --git a/pkg/tcpip/transport/tcp/testing/context/context.go b/pkg/tcpip/transport/tcp/testing/context/context.go new file mode 100644 index 000000000..6a402d150 --- /dev/null +++ b/pkg/tcpip/transport/tcp/testing/context/context.go @@ -0,0 +1,900 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context provides a test context for use in tcp tests. It also +// provides helper methods to assert/check certain behaviours. +package context + +import ( + "bytes" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/checker" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // StackAddr is the IPv4 address assigned to the stack. + StackAddr = "\x0a\x00\x00\x01" + + // StackPort is used as the listening port in tests for passive + // connects. + StackPort = 1234 + + // TestAddr is the source address for packets sent to the stack via the + // link layer endpoint. + TestAddr = "\x0a\x00\x00\x02" + + // TestPort is the TCP port used for packets sent to the stack + // via the link layer endpoint. + TestPort = 4096 + + // StackV6Addr is the IPv6 address assigned to the stack. + StackV6Addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + + // TestV6Addr is the source address for packets sent to the stack via + // the link layer endpoint. + TestV6Addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + + // StackV4MappedAddr is StackAddr as a mapped v6 address. + StackV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + StackAddr + + // TestV4MappedAddr is TestAddr as a mapped v6 address. + TestV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + TestAddr + + // V4MappedWildcardAddr is the mapped v6 representation of 0.0.0.0. + V4MappedWildcardAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00" + + // testInitialSequenceNumber is the initial sequence number sent in packets that + // are sent in response to a SYN or in the initial SYN sent to the stack. + testInitialSequenceNumber = 789 +) + +// defaultWindowScale value specified here depends on the tcp.DefaultBufferSize +// constant defined in the tcp/endpoint.go because the tcp.DefaultBufferSize is +// used in tcp.newHandshake to determine the window scale to use when sending a +// SYN/SYN-ACK. +var defaultWindowScale = tcp.FindWndScale(tcp.DefaultBufferSize) + +// Headers is used to represent the TCP header fields when building a +// new packet. +type Headers struct { + // SrcPort holds the src port value to be used in the packet. + SrcPort uint16 + + // DstPort holds the destination port value to be used in the packet. + DstPort uint16 + + // SeqNum is the value of the sequence number field in the TCP header. + SeqNum seqnum.Value + + // AckNum represents the acknowledgement number field in the TCP header. + AckNum seqnum.Value + + // Flags are the TCP flags in the TCP header. + Flags int + + // RcvWnd is the window to be advertised in the ReceiveWindow field of + // the TCP header. + RcvWnd seqnum.Size + + // TCPOpts holds the options to be sent in the option field of the TCP + // header. + TCPOpts []byte +} + +// Context provides an initialized Network stack and a link layer endpoint +// for use in TCP tests. +type Context struct { + t *testing.T + linkEP *channel.Endpoint + s *stack.Stack + + // IRS holds the initial sequence number in the SYN sent by endpoint in + // case of an active connect or the sequence number sent by the endpoint + // in the SYN-ACK sent in response to a SYN when listening in passive + // mode. + IRS seqnum.Value + + // Port holds the port bound by EP below in case of an active connect or + // the listening port number in case of a passive connect. + Port uint16 + + // EP is the test endpoint in the stack owned by this context. This endpoint + // is used in various tests to either initiate an active connect or is used + // as a passive listening endpoint to accept inbound connections. + EP tcpip.Endpoint + + // Wq is the wait queue associated with EP and is used to block for events + // on EP. + WQ waiter.Queue + + // TimeStampEnabled is true if ep is connected with the timestamp option + // enabled. + TimeStampEnabled bool +} + +// New allocates and initializes a test context containing a new +// stack and a link-layer endpoint. +func New(t *testing.T, mtu uint32) *Context { + s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{tcp.ProtocolName}) + + // Allow minimum send/receive buffer sizes to be 1 during tests. + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SendBufferSizeOption{1, tcp.DefaultBufferSize, tcp.DefaultBufferSize * 10}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.ReceiveBufferSizeOption{1, tcp.DefaultBufferSize, tcp.DefaultBufferSize * 10}); err != nil { + t.Fatalf("SetTransportProtocolOption failed: %v", err) + } + + // Some of the congestion control tests send up to 640 packets, we so + // set the channel size to 1000. + id, linkEP := channel.New(1000, mtu, "") + if testing.Verbose() { + id = sniffer.New(id) + } + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, StackAddr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(1, ipv6.ProtocolNumber, StackV6Addr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + { + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + { + Destination: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + }) + + return &Context{ + t: t, + s: s, + linkEP: linkEP, + } +} + +// Cleanup closes the context endpoint if required. +func (c *Context) Cleanup() { + if c.EP != nil { + c.EP.Close() + } +} + +// Stack returns a reference to the stack in the Context. +func (c *Context) Stack() *stack.Stack { + return c.s +} + +// CheckNoPacketTimeout verifies that no packet is received during the time +// specified by wait. +func (c *Context) CheckNoPacketTimeout(errMsg string, wait time.Duration) { + select { + case <-c.linkEP.C: + c.t.Fatalf(errMsg) + + case <-time.After(wait): + } +} + +// CheckNoPacket verifies that no packet is received for 1 second. +func (c *Context) CheckNoPacket(errMsg string) { + c.CheckNoPacketTimeout(errMsg, 1*time.Second) +} + +// GetPacket reads a packet from the link layer endpoint and verifies +// that it is an IPv4 packet with the expected source and destination +// addresses. It will fail with an error if no packet is received for +// 2 seconds. +func (c *Context) GetPacket() []byte { + select { + case p := <-c.linkEP.C: + if p.Proto != ipv4.ProtocolNumber { + c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv4.ProtocolNumber) + } + b := make([]byte, len(p.Header)+len(p.Payload)) + copy(b, p.Header) + copy(b[len(p.Header):], p.Payload) + + checker.IPv4(c.t, b, checker.SrcAddr(StackAddr), checker.DstAddr(TestAddr)) + return b + + case <-time.After(2 * time.Second): + c.t.Fatalf("Packet wasn't written out") + } + + return nil +} + +// SendICMPPacket builds and sends an ICMPv4 packet via the link layer endpoint. +func (c *Context) SendICMPPacket(typ header.ICMPv4Type, code uint8, p1, p2 []byte, maxTotalSize int) { + // Allocate a buffer data and headers. + buf := buffer.NewView(header.IPv4MinimumSize + header.ICMPv4MinimumSize + len(p1) + len(p2)) + if len(buf) > maxTotalSize { + buf = buf[:maxTotalSize] + } + + ip := header.IPv4(buf) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(len(buf)), + TTL: 65, + Protocol: uint8(header.ICMPv4ProtocolNumber), + SrcAddr: TestAddr, + DstAddr: StackAddr, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + icmp := header.ICMPv4(buf[header.IPv4MinimumSize:]) + icmp.SetType(typ) + icmp.SetCode(code) + + copy(icmp[header.ICMPv4MinimumSize:], p1) + copy(icmp[header.ICMPv4MinimumSize+len(p1):], p2) + + // Inject packet. + var views [1]buffer.View + vv := buf.ToVectorisedView(views) + c.linkEP.Inject(ipv4.ProtocolNumber, &vv) +} + +// SendPacket builds and sends a TCP segment(with the provided payload & TCP +// headers) in an IPv4 packet via the link layer endpoint. +func (c *Context) SendPacket(payload []byte, h *Headers) { + // Allocate a buffer for data and headers. + buf := buffer.NewView(header.TCPMinimumSize + header.IPv4MinimumSize + len(h.TCPOpts) + len(payload)) + copy(buf[len(buf)-len(payload):], payload) + copy(buf[len(buf)-len(payload)-len(h.TCPOpts):], h.TCPOpts) + + // Initialize the IP header. + ip := header.IPv4(buf) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(len(buf)), + TTL: 65, + Protocol: uint8(tcp.ProtocolNumber), + SrcAddr: TestAddr, + DstAddr: StackAddr, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + // Initialize the TCP header. + t := header.TCP(buf[header.IPv4MinimumSize:]) + t.Encode(&header.TCPFields{ + SrcPort: h.SrcPort, + DstPort: h.DstPort, + SeqNum: uint32(h.SeqNum), + AckNum: uint32(h.AckNum), + DataOffset: uint8(header.TCPMinimumSize + len(h.TCPOpts)), + Flags: uint8(h.Flags), + WindowSize: uint16(h.RcvWnd), + }) + + // Calculate the TCP pseudo-header checksum. + xsum := header.Checksum([]byte(TestAddr), 0) + xsum = header.Checksum([]byte(StackAddr), xsum) + xsum = header.Checksum([]byte{0, uint8(tcp.ProtocolNumber)}, xsum) + + // Calculate the TCP checksum and set it. + length := uint16(header.TCPMinimumSize + len(h.TCPOpts) + len(payload)) + xsum = header.Checksum(payload, xsum) + t.SetChecksum(^t.CalculateChecksum(xsum, length)) + + // Inject packet. + var views [1]buffer.View + vv := buf.ToVectorisedView(views) + c.linkEP.Inject(ipv4.ProtocolNumber, &vv) +} + +// SendAck sends an ACK packet. +func (c *Context) SendAck(seq seqnum.Value, bytesReceived int) { + c.SendPacket(nil, &Headers{ + SrcPort: TestPort, + DstPort: c.Port, + Flags: header.TCPFlagAck, + SeqNum: seqnum.Value(testInitialSequenceNumber).Add(1), + AckNum: c.IRS.Add(1 + seqnum.Size(bytesReceived)), + RcvWnd: 30000, + }) +} + +// ReceiveAndCheckPacket reads a packet from the link layer endpoint and +// verifies that the packet packet payload of packet matches the slice +// of data indicated by offset & size. +func (c *Context) ReceiveAndCheckPacket(data []byte, offset, size int) { + b := c.GetPacket() + checker.IPv4(c.t, b, + checker.PayloadLen(size+header.TCPMinimumSize), + checker.TCP( + checker.DstPort(TestPort), + checker.SeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))), + checker.AckNum(uint32(seqnum.Value(testInitialSequenceNumber).Add(1))), + checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)), + ), + ) + + pdata := data[offset:][:size] + if p := b[header.IPv4MinimumSize+header.TCPMinimumSize:]; bytes.Compare(pdata, p) != 0 { + c.t.Fatalf("Data is different: expected %v, got %v", pdata, p) + } +} + +// CreateV6Endpoint creates and initializes c.ep as a IPv6 Endpoint. If v6Only +// is true then it sets the IP_V6ONLY option on the socket to make it a IPv6 +// only endpoint instead of a default dual stack socket. +func (c *Context) CreateV6Endpoint(v6only bool) { + var err *tcpip.Error + c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &c.WQ) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + + var v tcpip.V6OnlyOption + if v6only { + v = 1 + } + if err := c.EP.SetSockOpt(v); err != nil { + c.t.Fatalf("SetSockOpt failed failed: %v", err) + } +} + +// GetV6Packet reads a single packet from the link layer endpoint of the context +// and asserts that it is an IPv6 Packet with the expected src/dest addresses. +func (c *Context) GetV6Packet() []byte { + select { + case p := <-c.linkEP.C: + if p.Proto != ipv6.ProtocolNumber { + c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv6.ProtocolNumber) + } + b := make([]byte, len(p.Header)+len(p.Payload)) + copy(b, p.Header) + copy(b[len(p.Header):], p.Payload) + + checker.IPv6(c.t, b, checker.SrcAddr(StackV6Addr), checker.DstAddr(TestV6Addr)) + return b + + case <-time.After(2 * time.Second): + c.t.Fatalf("Packet wasn't written out") + } + + return nil +} + +// SendV6Packet builds and sends an IPv6 Packet via the link layer endpoint of +// the context. +func (c *Context) SendV6Packet(payload []byte, h *Headers) { + // Allocate a buffer for data and headers. + buf := buffer.NewView(header.TCPMinimumSize + header.IPv6MinimumSize + len(payload)) + copy(buf[len(buf)-len(payload):], payload) + + // Initialize the IP header. + ip := header.IPv6(buf) + ip.Encode(&header.IPv6Fields{ + PayloadLength: uint16(header.TCPMinimumSize + len(payload)), + NextHeader: uint8(tcp.ProtocolNumber), + HopLimit: 65, + SrcAddr: TestV6Addr, + DstAddr: StackV6Addr, + }) + + // Initialize the TCP header. + t := header.TCP(buf[header.IPv6MinimumSize:]) + t.Encode(&header.TCPFields{ + SrcPort: h.SrcPort, + DstPort: h.DstPort, + SeqNum: uint32(h.SeqNum), + AckNum: uint32(h.AckNum), + DataOffset: header.TCPMinimumSize, + Flags: uint8(h.Flags), + WindowSize: uint16(h.RcvWnd), + }) + + // Calculate the TCP pseudo-header checksum. + xsum := header.Checksum([]byte(TestV6Addr), 0) + xsum = header.Checksum([]byte(StackV6Addr), xsum) + xsum = header.Checksum([]byte{0, uint8(tcp.ProtocolNumber)}, xsum) + + // Calculate the TCP checksum and set it. + length := uint16(header.TCPMinimumSize + len(payload)) + xsum = header.Checksum(payload, xsum) + t.SetChecksum(^t.CalculateChecksum(xsum, length)) + + // Inject packet. + var views [1]buffer.View + vv := buf.ToVectorisedView(views) + c.linkEP.Inject(ipv6.ProtocolNumber, &vv) +} + +// CreateConnected creates a connected TCP endpoint. +func (c *Context) CreateConnected(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption) { + c.CreateConnectedWithRawOptions(iss, rcvWnd, epRcvBuf, nil) +} + +// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends +// the specified option bytes as the Option field in the initial SYN packet. +// +// It also sets the receive buffer for the endpoint to the specified +// value in epRcvBuf. +func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption, options []byte) { + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + + if epRcvBuf != nil { + if err := c.EP.SetSockOpt(*epRcvBuf); err != nil { + c.t.Fatalf("SetSockOpt failed failed: %v", err) + } + } + + // Start connection attempt. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&waitEntry, waiter.EventOut) + defer c.WQ.EventUnregister(&waitEntry) + + err = c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort}) + if err != tcpip.ErrConnectStarted { + c.t.Fatalf("Unexpected return value from Connect: %v", err) + } + + // Receive SYN packet. + b := c.GetPacket() + checker.IPv4(c.t, b, + checker.TCP( + checker.DstPort(TestPort), + checker.TCPFlags(header.TCPFlagSyn), + ), + ) + + tcp := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcp.SequenceNumber()) + + c.SendPacket(nil, &Headers{ + SrcPort: tcp.DestinationPort(), + DstPort: tcp.SourcePort(), + Flags: header.TCPFlagSyn | header.TCPFlagAck, + SeqNum: iss, + AckNum: c.IRS.Add(1), + RcvWnd: rcvWnd, + TCPOpts: options, + }) + + // Receive ACK packet. + checker.IPv4(c.t, c.GetPacket(), + checker.TCP( + checker.DstPort(TestPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(c.IRS)+1), + checker.AckNum(uint32(iss)+1), + ), + ) + + // Wait for connection to be established. + select { + case <-notifyCh: + err = c.EP.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + c.t.Fatalf("Unexpected error when connecting: %v", err) + } + case <-time.After(1 * time.Second): + c.t.Fatalf("Timed out waiting for connection") + } + + c.Port = tcp.SourcePort() +} + +// RawEndpoint is just a small wrapper around a TCP endpoint's state to make +// sending data and ACK packets easy while being able to manipulate the sequence +// numbers and timestamp values as needed. +type RawEndpoint struct { + C *Context + SrcPort uint16 + DstPort uint16 + Flags int + NextSeqNum seqnum.Value + AckNum seqnum.Value + WndSize seqnum.Size + RecentTS uint32 // Stores the latest timestamp to echo back. + TSVal uint32 // TSVal stores the last timestamp sent by this endpoint. + + // SackPermitted is true if SACKPermitted option was negotiated for this endpoint. + SACKPermitted bool +} + +// SendPacketWithTS embeds the provided tsVal in the Timestamp option +// for the packet to be sent out. +func (r *RawEndpoint) SendPacketWithTS(payload []byte, tsVal uint32) { + r.TSVal = tsVal + tsOpt := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP} + header.EncodeTSOption(r.TSVal, r.RecentTS, tsOpt[2:]) + r.SendPacket(payload, tsOpt[:]) +} + +// SendPacket is a small wrapper function to build and send packets. +func (r *RawEndpoint) SendPacket(payload []byte, opts []byte) { + packetHeaders := &Headers{ + SrcPort: r.SrcPort, + DstPort: r.DstPort, + Flags: r.Flags, + SeqNum: r.NextSeqNum, + AckNum: r.AckNum, + RcvWnd: r.WndSize, + TCPOpts: opts, + } + r.C.SendPacket(payload, packetHeaders) + r.NextSeqNum = r.NextSeqNum.Add(seqnum.Size(len(payload))) +} + +// VerifyACKWithTS verifies that the tsEcr field in the ack matches the provided +// tsVal. +func (r *RawEndpoint) VerifyACKWithTS(tsVal uint32) { + // Read ACK and verify that tsEcr of ACK packet is [1,2,3,4] + ackPacket := r.C.GetPacket() + checker.IPv4(r.C.t, ackPacket, + checker.TCP( + checker.DstPort(r.SrcPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(r.AckNum)), + checker.AckNum(uint32(r.NextSeqNum)), + checker.TCPTimestampChecker(true, 0, tsVal), + ), + ) + // Store the parsed TSVal from the ack as recentTS. + tcpSeg := header.TCP(header.IPv4(ackPacket).Payload()) + opts := tcpSeg.ParsedOptions() + r.RecentTS = opts.TSVal +} + +// VerifyACKNoSACK verifies that the ACK does not contain a SACK block. +func (r *RawEndpoint) VerifyACKNoSACK() { + r.VerifyACKHasSACK(nil) +} + +// VerifyACKHasSACK verifies that the ACK contains the specified SACKBlocks. +func (r *RawEndpoint) VerifyACKHasSACK(sackBlocks []header.SACKBlock) { + // Read ACK and verify that the TCP options in the segment do + // not contain a SACK block. + ackPacket := r.C.GetPacket() + checker.IPv4(r.C.t, ackPacket, + checker.TCP( + checker.DstPort(r.SrcPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(r.AckNum)), + checker.AckNum(uint32(r.NextSeqNum)), + checker.TCPSACKBlockChecker(sackBlocks), + ), + ) +} + +// CreateConnectedWithOptions creates and connects c.ep with the specified TCP +// options enabled and returns a RawEndpoint which represents the other end of +// the connection. +// +// It also verifies where required(eg.Timestamp) that the ACK to the SYN-ACK +// does not carry an option that was not requested. +func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) *RawEndpoint { + var err *tcpip.Error + c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + c.t.Fatalf("c.s.NewEndpoint(tcp, ipv4...) = %v", err) + } + + // Start connection attempt. + waitEntry, notifyCh := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&waitEntry, waiter.EventOut) + defer c.WQ.EventUnregister(&waitEntry) + + testFullAddr := tcpip.FullAddress{Addr: TestAddr, Port: TestPort} + err = c.EP.Connect(testFullAddr) + if err != tcpip.ErrConnectStarted { + c.t.Fatalf("c.ep.Connect(%v) = %v", testFullAddr, err) + } + // Receive SYN packet. + b := c.GetPacket() + // Validate that the syn has the timestamp option and a valid + // TS value. + checker.IPv4(c.t, b, + checker.TCP( + checker.DstPort(TestPort), + checker.TCPFlags(header.TCPFlagSyn), + checker.TCPSynOptions(header.TCPSynOptions{ + MSS: uint16(c.linkEP.MTU() - header.IPv4MinimumSize - header.TCPMinimumSize), + TS: true, + WS: defaultWindowScale, + SACKPermitted: c.SACKEnabled(), + }), + ), + ) + tcpSeg := header.TCP(header.IPv4(b).Payload()) + synOptions := header.ParseSynOptions(tcpSeg.Options(), false) + + // Build options w/ tsVal to be sent in the SYN-ACK. + synAckOptions := make([]byte, 40) + offset := 0 + if wantOptions.TS { + offset += header.EncodeTSOption(wantOptions.TSVal, synOptions.TSVal, synAckOptions[offset:]) + } + if wantOptions.SACKPermitted { + offset += header.EncodeSACKPermittedOption(synAckOptions[offset:]) + } + + offset += header.AddTCPOptionPadding(synAckOptions, offset) + + // Build SYN-ACK. + c.IRS = seqnum.Value(tcpSeg.SequenceNumber()) + iss := seqnum.Value(testInitialSequenceNumber) + c.SendPacket(nil, &Headers{ + SrcPort: tcpSeg.DestinationPort(), + DstPort: tcpSeg.SourcePort(), + Flags: header.TCPFlagSyn | header.TCPFlagAck, + SeqNum: iss, + AckNum: c.IRS.Add(1), + RcvWnd: 30000, + TCPOpts: synAckOptions[:offset], + }) + + // Read ACK. + ackPacket := c.GetPacket() + + // Verify TCP header fields. + tcpCheckers := []checker.TransportChecker{ + checker.DstPort(TestPort), + checker.TCPFlags(header.TCPFlagAck), + checker.SeqNum(uint32(c.IRS) + 1), + checker.AckNum(uint32(iss) + 1), + } + + // Verify that tsEcr of ACK packet is wantOptions.TSVal if the + // timestamp option was enabled, if not then we verify that + // there is no timestamp in the ACK packet. + if wantOptions.TS { + tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(true, 0, wantOptions.TSVal)) + } else { + tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(false, 0, 0)) + } + + checker.IPv4(c.t, ackPacket, checker.TCP(tcpCheckers...)) + + ackSeg := header.TCP(header.IPv4(ackPacket).Payload()) + ackOptions := ackSeg.ParsedOptions() + + // Wait for connection to be established. + select { + case <-notifyCh: + err = c.EP.GetSockOpt(tcpip.ErrorOption{}) + if err != nil { + c.t.Fatalf("Unexpected error when connecting: %v", err) + } + case <-time.After(1 * time.Second): + c.t.Fatalf("Timed out waiting for connection") + } + + // Store the source port in use by the endpoint. + c.Port = tcpSeg.SourcePort() + + // Mark in context that timestamp option is enabled for this endpoint. + c.TimeStampEnabled = true + + return &RawEndpoint{ + C: c, + SrcPort: tcpSeg.DestinationPort(), + DstPort: tcpSeg.SourcePort(), + Flags: header.TCPFlagAck | header.TCPFlagPsh, + NextSeqNum: iss + 1, + AckNum: c.IRS.Add(1), + WndSize: 30000, + RecentTS: ackOptions.TSVal, + TSVal: wantOptions.TSVal, + SACKPermitted: wantOptions.SACKPermitted, + } +} + +// AcceptWithOptions initializes a listening endpoint and connects to it with the +// provided options enabled. It also verifies that the SYN-ACK has the expected +// values for the provided options. +// +// The function returns a RawEndpoint representing the other end of the accepted +// endpoint. +func (c *Context) AcceptWithOptions(wndScale int, synOptions header.TCPSynOptions) *RawEndpoint { + // Create EP and start listening. + wq := &waiter.Queue{} + ep, err := c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + defer ep.Close() + + if err := ep.Bind(tcpip.FullAddress{Port: StackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + if err := ep.Listen(10); err != nil { + c.t.Fatalf("Listen failed: %v", err) + } + + rep := c.PassiveConnectWithOptions(100, wndScale, synOptions) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + c.EP, _, err = ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + c.EP, _, err = ep.Accept() + if err != nil { + c.t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + c.t.Fatalf("Timed out waiting for accept") + } + } + return rep +} + +// PassiveConnect just disables WindowScaling and delegates the call to +// PassiveConnectWithOptions. +func (c *Context) PassiveConnect(maxPayload, wndScale int, synOptions header.TCPSynOptions) { + synOptions.WS = -1 + c.PassiveConnectWithOptions(maxPayload, wndScale, synOptions) +} + +// PassiveConnectWithOptions initiates a new connection (with the specified TCP +// options enabled) to the port on which the Context.ep is listening for new +// connections. It also validates that the SYN-ACK has the expected values for +// the enabled options. +// +// NOTE: MSS is not a negotiated option and it can be asymmetric +// in each direction. This function uses the maxPayload to set the MSS to be +// sent to the peer on a connect and validates that the MSS in the SYN-ACK +// response is equal to the MTU - (tcphdr len + iphdr len). +// +// wndScale is the expected window scale in the SYN-ACK and synOptions.WS is the +// value of the window scaling option to be sent in the SYN. If synOptions.WS > +// 0 then we send the WindowScale option. +func (c *Context) PassiveConnectWithOptions(maxPayload, wndScale int, synOptions header.TCPSynOptions) *RawEndpoint { + opts := make([]byte, 40) + offset := 0 + offset += header.EncodeMSSOption(uint32(maxPayload), opts) + + if synOptions.WS >= 0 { + offset += header.EncodeWSOption(3, opts[offset:]) + } + if synOptions.TS { + offset += header.EncodeTSOption(synOptions.TSVal, synOptions.TSEcr, opts[offset:]) + } + + if synOptions.SACKPermitted { + offset += header.EncodeSACKPermittedOption(opts[offset:]) + } + + paddingToAdd := 4 - offset%4 + // Now add any padding bytes that might be required to quad align the + // options. + for i := offset; i < offset+paddingToAdd; i++ { + opts[i] = header.TCPOptionNOP + } + offset += paddingToAdd + + // Send a SYN request. + iss := seqnum.Value(testInitialSequenceNumber) + c.SendPacket(nil, &Headers{ + SrcPort: TestPort, + DstPort: StackPort, + Flags: header.TCPFlagSyn, + SeqNum: iss, + RcvWnd: 30000, + TCPOpts: opts[:offset], + }) + + // Receive the SYN-ACK reply. Make sure MSS and other expected options + // are present. + b := c.GetPacket() + tcp := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcp.SequenceNumber()) + + tcpCheckers := []checker.TransportChecker{ + checker.SrcPort(StackPort), + checker.DstPort(TestPort), + checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn), + checker.AckNum(uint32(iss) + 1), + checker.TCPSynOptions(header.TCPSynOptions{MSS: synOptions.MSS, WS: wndScale, SACKPermitted: synOptions.SACKPermitted && c.SACKEnabled()}), + } + + // If TS option was enabled in the original SYN then add a checker to + // validate the Timestamp option in the SYN-ACK. + if synOptions.TS { + tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(synOptions.TS, 0, synOptions.TSVal)) + } else { + tcpCheckers = append(tcpCheckers, checker.TCPTimestampChecker(false, 0, 0)) + } + + checker.IPv4(c.t, b, checker.TCP(tcpCheckers...)) + rcvWnd := seqnum.Size(30000) + ackHeaders := &Headers{ + SrcPort: TestPort, + DstPort: StackPort, + Flags: header.TCPFlagAck, + SeqNum: iss + 1, + AckNum: c.IRS + 1, + RcvWnd: rcvWnd, + } + + // If WS was expected to be in effect then scale the advertised window + // correspondingly. + if synOptions.WS > 0 { + ackHeaders.RcvWnd = rcvWnd >> byte(synOptions.WS) + } + + parsedOpts := tcp.ParsedOptions() + if synOptions.TS { + // Echo the tsVal back to the peer in the tsEcr field of the + // timestamp option. + // Increment TSVal by 1 from the value sent in the SYN and echo + // the TSVal in the SYN-ACK in the TSEcr field. + opts := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP} + header.EncodeTSOption(synOptions.TSVal+1, parsedOpts.TSVal, opts[2:]) + ackHeaders.TCPOpts = opts[:] + } + + // Send ACK. + c.SendPacket(nil, ackHeaders) + + c.Port = StackPort + + return &RawEndpoint{ + C: c, + SrcPort: TestPort, + DstPort: StackPort, + Flags: header.TCPFlagPsh | header.TCPFlagAck, + NextSeqNum: iss + 1, + AckNum: c.IRS + 1, + WndSize: rcvWnd, + SACKPermitted: synOptions.SACKPermitted && c.SACKEnabled(), + RecentTS: parsedOpts.TSVal, + TSVal: synOptions.TSVal + 1, + } +} + +// SACKEnabled returns true if the TCP Protocol option SACKEnabled is set to true +// for the Stack in the context. +func (c *Context) SACKEnabled() bool { + var v tcp.SACKEnabled + if err := c.Stack().TransportProtocolOption(tcp.ProtocolNumber, &v); err != nil { + // Stack doesn't support SACK. So just return. + return false + } + return bool(v) +} diff --git a/pkg/tcpip/transport/tcp/timer.go b/pkg/tcpip/transport/tcp/timer.go new file mode 100644 index 000000000..7aa824d8f --- /dev/null +++ b/pkg/tcpip/transport/tcp/timer.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcp + +import ( + "time" + + "gvisor.googlesource.com/gvisor/pkg/sleep" +) + +type timerState int + +const ( + timerStateDisabled timerState = iota + timerStateEnabled + timerStateOrphaned +) + +// timer is a timer implementation that reduces the interactions with the +// runtime timer infrastructure by letting timers run (and potentially +// eventually expire) even if they are stopped. It makes it cheaper to +// disable/reenable timers at the expense of spurious wakes. This is useful for +// cases when the same timer is disabled/reenabled repeatedly with relatively +// long timeouts farther into the future. +// +// TCP retransmit timers benefit from this because they the timeouts are long +// (currently at least 200ms), and get disabled when acks are received, and +// reenabled when new pending segments are sent. +// +// It is advantageous to avoid interacting with the runtime because it acquires +// a global mutex and performs O(log n) operations, where n is the global number +// of timers, whenever a timer is enabled or disabled, and may make a syscall. +// +// This struct is thread-compatible. +type timer struct { + // state is the current state of the timer, it can be one of the + // following values: + // disabled - the timer is disabled. + // orphaned - the timer is disabled, but the runtime timer is + // enabled, which means that it will evetually cause a + // spurious wake (unless it gets enabled again before + // then). + // enabled - the timer is enabled, but the runtime timer may be set + // to an earlier expiration time due to a previous + // orphaned state. + state timerState + + // target is the expiration time of the current timer. It is only + // meaningful in the enabled state. + target time.Time + + // runtimeTarget is the expiration time of the runtime timer. It is + // meaningful in the enabled and orphaned states. + runtimeTarget time.Time + + // timer is the runtime timer used to wait on. + timer *time.Timer +} + +// init initializes the timer. Once it expires, it the given waker will be +// asserted. +func (t *timer) init(w *sleep.Waker) { + t.state = timerStateDisabled + + // Initialize a runtime timer that will assert the waker, then + // immediately stop it. + t.timer = time.AfterFunc(time.Hour, func() { + w.Assert() + }) + t.timer.Stop() +} + +// cleanup frees all resources associated with the timer. +func (t *timer) cleanup() { + t.timer.Stop() +} + +// checkExpiration checks if the given timer has actually expired, it should be +// called whenever a sleeper wakes up due to the waker being asserted, and is +// used to check if it's a supurious wake (due to a previously orphaned timer) +// or a legitimate one. +func (t *timer) checkExpiration() bool { + // Transition to fully disabled state if we're just consuming an + // orphaned timer. + if t.state == timerStateOrphaned { + t.state = timerStateDisabled + return false + } + + // The timer is enabled, but it may have expired early. Check if that's + // the case, and if so, reset the runtime timer to the correct time. + now := time.Now() + if now.Before(t.target) { + t.runtimeTarget = t.target + t.timer.Reset(t.target.Sub(now)) + return false + } + + // The timer has actually expired, disable it for now and inform the + // caller. + t.state = timerStateDisabled + return true +} + +// disable disables the timer, leaving it in an orphaned state if it wasn't +// already disabled. +func (t *timer) disable() { + if t.state != timerStateDisabled { + t.state = timerStateOrphaned + } +} + +// enabled returns true if the timer is currently enabled, false otherwise. +func (t *timer) enabled() bool { + return t.state == timerStateEnabled +} + +// enable enables the timer, programming the runtime timer if necessary. +func (t *timer) enable(d time.Duration) { + t.target = time.Now().Add(d) + + // Check if we need to set the runtime timer. + if t.state == timerStateDisabled || t.target.Before(t.runtimeTarget) { + t.runtimeTarget = t.target + t.timer.Reset(d) + } + + t.state = timerStateEnabled +} diff --git a/pkg/tcpip/transport/tcpconntrack/BUILD b/pkg/tcpip/transport/tcpconntrack/BUILD new file mode 100644 index 000000000..cf83ca134 --- /dev/null +++ b/pkg/tcpip/transport/tcpconntrack/BUILD @@ -0,0 +1,24 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "tcpconntrack", + srcs = ["tcp_conntrack.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcpconntrack", + visibility = ["//visibility:public"], + deps = [ + "//pkg/tcpip/header", + "//pkg/tcpip/seqnum", + ], +) + +go_test( + name = "tcpconntrack_test", + size = "small", + srcs = ["tcp_conntrack_test.go"], + deps = [ + ":tcpconntrack", + "//pkg/tcpip/header", + ], +) diff --git a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go new file mode 100644 index 000000000..487f2572d --- /dev/null +++ b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack.go @@ -0,0 +1,333 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tcpconntrack implements a TCP connection tracking object. It allows +// users with access to a segment stream to figure out when a connection is +// established, reset, and closed (and in the last case, who closed first). +package tcpconntrack + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/seqnum" +) + +// Result is returned when the state of a TCB is updated in response to an +// inbound or outbound segment. +type Result int + +const ( + // ResultDrop indicates that the segment should be dropped. + ResultDrop Result = iota + + // ResultConnecting indicates that the connection remains in a + // connecting state. + ResultConnecting + + // ResultAlive indicates that the connection remains alive (connected). + ResultAlive + + // ResultReset indicates that the connection was reset. + ResultReset + + // ResultClosedByPeer indicates that the connection was gracefully + // closed, and the inbound stream was closed first. + ResultClosedByPeer + + // ResultClosedBySelf indicates that the connection was gracefully + // closed, and the outbound stream was closed first. + ResultClosedBySelf +) + +// TCB is a TCP Control Block. It holds state necessary to keep track of a TCP +// connection and inform the caller when the connection has been closed. +type TCB struct { + inbound stream + outbound stream + + // State handlers. + handlerInbound func(*TCB, header.TCP) Result + handlerOutbound func(*TCB, header.TCP) Result + + // firstFin holds a pointer to the first stream to send a FIN. + firstFin *stream + + // state is the current state of the stream. + state Result +} + +// Init initializes the state of the TCB according to the initial SYN. +func (t *TCB) Init(initialSyn header.TCP) { + t.handlerInbound = synSentStateInbound + t.handlerOutbound = synSentStateOutbound + + iss := seqnum.Value(initialSyn.SequenceNumber()) + t.outbound.una = iss + t.outbound.nxt = iss.Add(logicalLen(initialSyn)) + t.outbound.end = t.outbound.nxt + + // Even though "end" is a sequence number, we don't know the initial + // receive sequence number yet, so we store the window size until we get + // a SYN from the peer. + t.inbound.una = 0 + t.inbound.nxt = 0 + t.inbound.end = seqnum.Value(initialSyn.WindowSize()) + t.state = ResultConnecting +} + +// UpdateStateInbound updates the state of the TCB based on the supplied inbound +// segment. +func (t *TCB) UpdateStateInbound(tcp header.TCP) Result { + st := t.handlerInbound(t, tcp) + if st != ResultDrop { + t.state = st + } + return st +} + +// UpdateStateOutbound updates the state of the TCB based on the supplied +// outbound segment. +func (t *TCB) UpdateStateOutbound(tcp header.TCP) Result { + st := t.handlerOutbound(t, tcp) + if st != ResultDrop { + t.state = st + } + return st +} + +// IsAlive returns true as long as the connection is established(Alive) +// or connecting state. +func (t *TCB) IsAlive() bool { + return !t.inbound.rstSeen && !t.outbound.rstSeen && (!t.inbound.closed() || !t.outbound.closed()) +} + +// OutboundSendSequenceNumber returns the snd.NXT for the outbound stream. +func (t *TCB) OutboundSendSequenceNumber() seqnum.Value { + return t.outbound.nxt +} + +// adapResult modifies the supplied "Result" according to the state of the TCB; +// if r is anything other than "Alive", or if one of the streams isn't closed +// yet, it is returned unmodified. Otherwise it's converted to either +// ClosedBySelf or ClosedByPeer depending on which stream was closed first. +func (t *TCB) adaptResult(r Result) Result { + // Check the unmodified case. + if r != ResultAlive || !t.inbound.closed() || !t.outbound.closed() { + return r + } + + // Find out which was closed first. + if t.firstFin == &t.outbound { + return ResultClosedBySelf + } + + return ResultClosedByPeer +} + +// synSentStateInbound is the state handler for inbound segments when the +// connection is in SYN-SENT state. +func synSentStateInbound(t *TCB, tcp header.TCP) Result { + flags := tcp.Flags() + ackPresent := flags&header.TCPFlagAck != 0 + ack := seqnum.Value(tcp.AckNumber()) + + // Ignore segment if ack is present but not acceptable. + if ackPresent && !(ack-1).InRange(t.outbound.una, t.outbound.nxt) { + return ResultConnecting + } + + // If reset is specified, we will let the packet through no matter what + // but we will also destroy the connection if the ACK is present (and + // implicitly acceptable). + if flags&header.TCPFlagRst != 0 { + if ackPresent { + t.inbound.rstSeen = true + return ResultReset + } + return ResultConnecting + } + + // Ignore segment if SYN is not set. + if flags&header.TCPFlagSyn == 0 { + return ResultConnecting + } + + // Update state informed by this SYN. + irs := seqnum.Value(tcp.SequenceNumber()) + t.inbound.una = irs + t.inbound.nxt = irs.Add(logicalLen(tcp)) + t.inbound.end += irs + + t.outbound.end = t.outbound.una.Add(seqnum.Size(tcp.WindowSize())) + + // If the ACK was set (it is acceptable), update our unacknowledgement + // tracking. + if ackPresent { + // Advance the "una" and "end" indices of the outbound stream. + if t.outbound.una.LessThan(ack) { + t.outbound.una = ack + } + + if end := ack.Add(seqnum.Size(tcp.WindowSize())); t.outbound.end.LessThan(end) { + t.outbound.end = end + } + } + + // Update handlers so that new calls will be handled by new state. + t.handlerInbound = allOtherInbound + t.handlerOutbound = allOtherOutbound + + return ResultAlive +} + +// synSentStateOutbound is the state handler for outbound segments when the +// connection is in SYN-SENT state. +func synSentStateOutbound(t *TCB, tcp header.TCP) Result { + // Drop outbound segments that aren't retransmits of the original one. + if tcp.Flags() != header.TCPFlagSyn || + tcp.SequenceNumber() != uint32(t.outbound.una) { + return ResultDrop + } + + // Update the receive window. We only remember the largest value seen. + if wnd := seqnum.Value(tcp.WindowSize()); wnd > t.inbound.end { + t.inbound.end = wnd + } + + return ResultConnecting +} + +// update updates the state of inbound and outbound streams, given the supplied +// inbound segment. For outbound segments, this same function can be called with +// swapped inbound/outbound streams. +func update(tcp header.TCP, inbound, outbound *stream, firstFin **stream) Result { + // Ignore segments out of the window. + s := seqnum.Value(tcp.SequenceNumber()) + if !inbound.acceptable(s, dataLen(tcp)) { + return ResultAlive + } + + flags := tcp.Flags() + if flags&header.TCPFlagRst != 0 { + inbound.rstSeen = true + return ResultReset + } + + // Ignore segments that don't have the ACK flag, and those with the SYN + // flag. + if flags&header.TCPFlagAck == 0 || flags&header.TCPFlagSyn != 0 { + return ResultAlive + } + + // Ignore segments that acknowledge not yet sent data. + ack := seqnum.Value(tcp.AckNumber()) + if outbound.nxt.LessThan(ack) { + return ResultAlive + } + + // Advance the "una" and "end" indices of the outbound stream. + if outbound.una.LessThan(ack) { + outbound.una = ack + } + + if end := ack.Add(seqnum.Size(tcp.WindowSize())); outbound.end.LessThan(end) { + outbound.end = end + } + + // Advance the "nxt" index of the inbound stream. + end := s.Add(logicalLen(tcp)) + if inbound.nxt.LessThan(end) { + inbound.nxt = end + } + + // Note the index of the FIN segment. And stash away a pointer to the + // first stream to see a FIN. + if flags&header.TCPFlagFin != 0 && !inbound.finSeen { + inbound.finSeen = true + inbound.fin = end - 1 + + if *firstFin == nil { + *firstFin = inbound + } + } + + return ResultAlive +} + +// allOtherInbound is the state handler for inbound segments in all states +// except SYN-SENT. +func allOtherInbound(t *TCB, tcp header.TCP) Result { + return t.adaptResult(update(tcp, &t.inbound, &t.outbound, &t.firstFin)) +} + +// allOtherOutbound is the state handler for outbound segments in all states +// except SYN-SENT. +func allOtherOutbound(t *TCB, tcp header.TCP) Result { + return t.adaptResult(update(tcp, &t.outbound, &t.inbound, &t.firstFin)) +} + +// streams holds the state of a TCP unidirectional stream. +type stream struct { + // The interval [una, end) is the allowed interval as defined by the + // receiver, i.e., anything less than una has already been acknowledged + // and anything greater than or equal to end is beyond the receiver + // window. The interval [una, nxt) is the acknowledgable range, whose + // right edge indicates the sequence number of the next byte to be sent + // by the sender, i.e., anything greater than or equal to nxt hasn't + // been sent yet. + una seqnum.Value + nxt seqnum.Value + end seqnum.Value + + // finSeen indicates if a FIN has already been sent on this stream. + finSeen bool + + // fin is the sequence number of the FIN. It is only valid after finSeen + // is set to true. + fin seqnum.Value + + // rstSeen indicates if a RST has already been sent on this stream. + rstSeen bool +} + +// acceptable determines if the segment with the given sequence number and data +// length is acceptable, i.e., if it's within the [una, end) window or, in case +// the window is zero, if it's a packet with no payload and sequence number +// equal to una. +func (s *stream) acceptable(segSeq seqnum.Value, segLen seqnum.Size) bool { + wnd := s.una.Size(s.end) + if wnd == 0 { + return segLen == 0 && segSeq == s.una + } + + // Make sure [segSeq, seqSeq+segLen) is non-empty. + if segLen == 0 { + segLen = 1 + } + + return seqnum.Overlap(s.una, wnd, segSeq, segLen) +} + +// closed determines if the stream has already been closed. This happens when +// a FIN has been set by the sender and acknowledged by the receiver. +func (s *stream) closed() bool { + return s.finSeen && s.fin.LessThan(s.una) +} + +// dataLen returns the length of the TCP segment payload. +func dataLen(tcp header.TCP) seqnum.Size { + return seqnum.Size(len(tcp) - int(tcp.DataOffset())) +} + +// logicalLen calculates the logical length of the TCP segment. +func logicalLen(tcp header.TCP) seqnum.Size { + l := dataLen(tcp) + flags := tcp.Flags() + if flags&header.TCPFlagSyn != 0 { + l++ + } + if flags&header.TCPFlagFin != 0 { + l++ + } + return l +} diff --git a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go new file mode 100644 index 000000000..50cab3132 --- /dev/null +++ b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go @@ -0,0 +1,501 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tcpconntrack_test + +import ( + "testing" + + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcpconntrack" +) + +// connected creates a connection tracker TCB and sets it to a connected state +// by performing a 3-way handshake. +func connected(t *testing.T, iss, irs uint32, isw, irw uint16) *tcpconntrack.TCB { + // Send SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: iss, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: irw, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive SYN-ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: irs, + AckNum: iss + 1, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn | header.TCPFlagAck, + WindowSize: isw, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: iss + 1, + AckNum: irs + 1, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: irw, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + return &tcb +} + +func TestConnectionRefused(t *testing.T) { + // Send SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive RST. + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: 1235, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagRst | header.TCPFlagAck, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultReset { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset) + } +} + +func TestConnectionRefusedInSynRcvd(t *testing.T) { + // Send SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive SYN. + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Receive RST with no ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 790, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagRst, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultReset { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset) + } +} + +func TestConnectionResetInSynRcvd(t *testing.T) { + // Send SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive SYN. + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send RST with no ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 1235, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagRst, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultReset { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultReset) + } +} + +func TestRetransmitOnSynSent(t *testing.T) { + // Send initial SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Retransmit the same SYN. + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultConnecting { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultConnecting) + } +} + +func TestRetransmitOnSynRcvd(t *testing.T) { + // Send initial SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive SYN. This will cause the state to go to SYN-RCVD. + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Retransmit the original SYN. + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Transmit a SYN-ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 790, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn | header.TCPFlagAck, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } +} + +func TestClosedBySelf(t *testing.T) { + tcb := connected(t, 1234, 789, 30000, 50000) + + // Send FIN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1235, + AckNum: 790, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Receive FIN/ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 790, + AckNum: 1236, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 1236, + AckNum: 791, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultClosedBySelf { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedBySelf) + } +} + +func TestClosedByPeer(t *testing.T) { + tcb := connected(t, 1234, 789, 30000, 50000) + + // Receive FIN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 790, + AckNum: 1235, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send FIN/ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 1235, + AckNum: 791, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Receive ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 791, + AckNum: 1236, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultClosedByPeer { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedByPeer) + } +} + +func TestSendAndReceiveDataClosedBySelf(t *testing.T) { + sseq := uint32(1234) + rseq := uint32(789) + tcb := connected(t, sseq, rseq, 30000, 50000) + sseq++ + rseq++ + + // Send some data. + tcp := make(header.TCP, header.TCPMinimumSize+1024) + + for i := uint32(0); i < 10; i++ { + // Send some data. + tcp.Encode(&header.TCPFields{ + SeqNum: sseq, + AckNum: rseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 30000, + }) + sseq += uint32(len(tcp)) - header.TCPMinimumSize + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Receive ack for data. + tcp.Encode(&header.TCPFields{ + SeqNum: rseq, + AckNum: sseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp[:header.TCPMinimumSize]); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + } + + for i := uint32(0); i < 10; i++ { + // Receive some data. + tcp.Encode(&header.TCPFields{ + SeqNum: rseq, + AckNum: sseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 50000, + }) + rseq += uint32(len(tcp)) - header.TCPMinimumSize + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send ack for data. + tcp.Encode(&header.TCPFields{ + SeqNum: sseq, + AckNum: rseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp[:header.TCPMinimumSize]); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + } + + // Send FIN. + tcp = tcp[:header.TCPMinimumSize] + tcp.Encode(&header.TCPFields{ + SeqNum: sseq, + AckNum: rseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 30000, + }) + sseq++ + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Receive FIN/ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: rseq, + AckNum: sseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck | header.TCPFlagFin, + WindowSize: 50000, + }) + rseq++ + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: sseq, + AckNum: rseq, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultClosedBySelf { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultClosedBySelf) + } +} + +func TestIgnoreBadResetOnSynSent(t *testing.T) { + // Send SYN. + tcp := make(header.TCP, header.TCPMinimumSize) + tcp.Encode(&header.TCPFields{ + SeqNum: 1234, + AckNum: 0, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn, + WindowSize: 30000, + }) + + tcb := tcpconntrack.TCB{} + tcb.Init(tcp) + + // Receive a RST with a bad ACK, it should not cause the connection to + // be reset. + acks := []uint32{1234, 1236, 1000, 5000} + flags := []uint8{header.TCPFlagRst, header.TCPFlagRst | header.TCPFlagAck} + for _, a := range acks { + for _, f := range flags { + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: a, + DataOffset: header.TCPMinimumSize, + Flags: f, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultConnecting { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + } + } + + // Complete the handshake. + // Receive SYN-ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 789, + AckNum: 1235, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagSyn | header.TCPFlagAck, + WindowSize: 50000, + }) + + if r := tcb.UpdateStateInbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } + + // Send ACK. + tcp.Encode(&header.TCPFields{ + SeqNum: 1235, + AckNum: 790, + DataOffset: header.TCPMinimumSize, + Flags: header.TCPFlagAck, + WindowSize: 30000, + }) + + if r := tcb.UpdateStateOutbound(tcp); r != tcpconntrack.ResultAlive { + t.Fatalf("Bad result: got %v, want %v", r, tcpconntrack.ResultAlive) + } +} diff --git a/pkg/tcpip/transport/udp/BUILD b/pkg/tcpip/transport/udp/BUILD new file mode 100644 index 000000000..ac34a932e --- /dev/null +++ b/pkg/tcpip/transport/udp/BUILD @@ -0,0 +1,77 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_generics:defs.bzl", "go_template_instance") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "udp_state", + srcs = [ + "endpoint.go", + "endpoint_state.go", + "udp_packet_list.go", + ], + out = "udp_state.go", + imports = ["gvisor.googlesource.com/gvisor/pkg/tcpip/buffer"], + package = "udp", +) + +go_template_instance( + name = "udp_packet_list", + out = "udp_packet_list.go", + package = "udp", + prefix = "udpPacket", + template = "//pkg/ilist:generic_list", + types = { + "Linker": "*udpPacket", + }, +) + +go_library( + name = "udp", + srcs = [ + "endpoint.go", + "endpoint_state.go", + "protocol.go", + "udp_packet_list.go", + "udp_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp", + visibility = ["//visibility:public"], + deps = [ + "//pkg/sleep", + "//pkg/state", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/header", + "//pkg/tcpip/stack", + "//pkg/waiter", + ], +) + +go_test( + name = "udp_x_test", + size = "small", + srcs = ["udp_test.go"], + deps = [ + ":udp", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/checker", + "//pkg/tcpip/header", + "//pkg/tcpip/link/channel", + "//pkg/tcpip/link/sniffer", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/stack", + "//pkg/waiter", + ], +) + +filegroup( + name = "autogen", + srcs = [ + "udp_packet_list.go", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/tcpip/transport/udp/endpoint.go b/pkg/tcpip/transport/udp/endpoint.go new file mode 100644 index 000000000..80fa88c4c --- /dev/null +++ b/pkg/tcpip/transport/udp/endpoint.go @@ -0,0 +1,746 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package udp + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/sleep" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +type udpPacket struct { + udpPacketEntry + senderAddress tcpip.FullAddress + data buffer.VectorisedView `state:".(buffer.VectorisedView)"` + // views is used as buffer for data when its length is large + // enough to store a VectorisedView. + views [8]buffer.View `state:"nosave"` +} + +type endpointState int + +const ( + stateInitial endpointState = iota + stateBound + stateConnected + stateClosed +) + +// endpoint represents a UDP endpoint. This struct serves as the interface +// between users of the endpoint and the protocol implementation; it is legal to +// have concurrent goroutines make calls into the endpoint, they are properly +// synchronized. +type endpoint struct { + // The following fields are initialized at creation time and do not + // change throughout the lifetime of the endpoint. + stack *stack.Stack `state:"manual"` + netProto tcpip.NetworkProtocolNumber + waiterQueue *waiter.Queue + + // The following fields are used to manage the receive queue, and are + // protected by rcvMu. + rcvMu sync.Mutex `state:"nosave"` + rcvReady bool + rcvList udpPacketList + rcvBufSizeMax int `state:".(int)"` + rcvBufSize int + rcvClosed bool + + // The following fields are protected by the mu mutex. + mu sync.RWMutex `state:"nosave"` + sndBufSize int + id stack.TransportEndpointID + state endpointState + bindNICID tcpip.NICID + bindAddr tcpip.Address + regNICID tcpip.NICID + route stack.Route `state:"manual"` + dstPort uint16 + v6only bool + + // effectiveNetProtos contains the network protocols actually in use. In + // most cases it will only contain "netProto", but in cases like IPv6 + // endpoints with v6only set to false, this could include multiple + // protocols (e.g., IPv6 and IPv4) or a single different protocol (e.g., + // IPv4 when IPv6 endpoint is bound or connected to an IPv4 mapped + // address). + effectiveNetProtos []tcpip.NetworkProtocolNumber +} + +func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) *endpoint { + return &endpoint{ + stack: stack, + netProto: netProto, + waiterQueue: waiterQueue, + rcvBufSizeMax: 32 * 1024, + sndBufSize: 32 * 1024, + } +} + +// NewConnectedEndpoint creates a new endpoint in the connected state using the +// provided route. +func NewConnectedEndpoint(stack *stack.Stack, r *stack.Route, id stack.TransportEndpointID, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + ep := newEndpoint(stack, r.NetProto, waiterQueue) + + // Register new endpoint so that packets are routed to it. + if err := stack.RegisterTransportEndpoint(r.NICID(), []tcpip.NetworkProtocolNumber{r.NetProto}, ProtocolNumber, id, ep); err != nil { + ep.Close() + return nil, err + } + + ep.id = id + ep.route = r.Clone() + ep.dstPort = id.RemotePort + ep.regNICID = r.NICID() + + ep.state = stateConnected + + return ep, nil +} + +// Close puts the endpoint in a closed state and frees all resources +// associated with it. +func (e *endpoint) Close() { + e.mu.Lock() + defer e.mu.Unlock() + + switch e.state { + case stateBound, stateConnected: + e.stack.UnregisterTransportEndpoint(e.regNICID, e.effectiveNetProtos, ProtocolNumber, e.id) + } + + // Close the receive list and drain it. + e.rcvMu.Lock() + e.rcvClosed = true + e.rcvBufSize = 0 + for !e.rcvList.Empty() { + p := e.rcvList.Front() + e.rcvList.Remove(p) + } + e.rcvMu.Unlock() + + e.route.Release() + + // Update the state. + e.state = stateClosed +} + +// Read reads data from the endpoint. This method does not block if +// there is no data pending. +func (e *endpoint) Read(addr *tcpip.FullAddress) (buffer.View, *tcpip.Error) { + e.rcvMu.Lock() + + if e.rcvList.Empty() { + err := tcpip.ErrWouldBlock + if e.rcvClosed { + err = tcpip.ErrClosedForReceive + } + e.rcvMu.Unlock() + return buffer.View{}, err + } + + p := e.rcvList.Front() + e.rcvList.Remove(p) + e.rcvBufSize -= p.data.Size() + + e.rcvMu.Unlock() + + if addr != nil { + *addr = p.senderAddress + } + + return p.data.ToView(), nil +} + +// prepareForWrite prepares the endpoint for sending data. In particular, it +// binds it if it's still in the initial state. To do so, it must first +// reacquire the mutex in exclusive mode. +// +// Returns true for retry if preparation should be retried. +func (e *endpoint) prepareForWrite(to *tcpip.FullAddress) (retry bool, err *tcpip.Error) { + switch e.state { + case stateInitial: + case stateConnected: + return false, nil + + case stateBound: + if to == nil { + return false, tcpip.ErrDestinationRequired + } + return false, nil + default: + return false, tcpip.ErrInvalidEndpointState + } + + e.mu.RUnlock() + defer e.mu.RLock() + + e.mu.Lock() + defer e.mu.Unlock() + + // The state changed when we released the shared locked and re-acquired + // it in exclusive mode. Try again. + if e.state != stateInitial { + return true, nil + } + + // The state is still 'initial', so try to bind the endpoint. + if err := e.bindLocked(tcpip.FullAddress{}, nil); err != nil { + return false, err + } + + return true, nil +} + +// Write writes data to the endpoint's peer. This method does not block +// if the data cannot be written. +func (e *endpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, *tcpip.Error) { + // MSG_MORE is unimplemented. (This also means that MSG_EOR is a no-op.) + if opts.More { + return 0, tcpip.ErrInvalidOptionValue + } + + to := opts.To + + e.mu.RLock() + defer e.mu.RUnlock() + + // Prepare for write. + for { + retry, err := e.prepareForWrite(to) + if err != nil { + return 0, err + } + + if !retry { + break + } + } + + var route *stack.Route + var dstPort uint16 + if to == nil { + route = &e.route + dstPort = e.dstPort + + if route.IsResolutionRequired() { + // Promote lock to exclusive if using a shared route, given that it may need to + // change in Route.Resolve() call below. + e.mu.RUnlock() + defer e.mu.RLock() + + e.mu.Lock() + defer e.mu.Unlock() + + // Recheck state after lock was re-acquired. + if e.state != stateConnected { + return 0, tcpip.ErrInvalidEndpointState + } + } + } else { + // Reject destination address if it goes through a different + // NIC than the endpoint was bound to. + nicid := to.NIC + if e.bindNICID != 0 { + if nicid != 0 && nicid != e.bindNICID { + return 0, tcpip.ErrNoRoute + } + + nicid = e.bindNICID + } + + toCopy := *to + to = &toCopy + netProto, err := e.checkV4Mapped(to, true) + if err != nil { + return 0, err + } + + // Find the enpoint. + r, err := e.stack.FindRoute(nicid, e.bindAddr, to.Addr, netProto) + if err != nil { + return 0, err + } + defer r.Release() + + route = &r + dstPort = to.Port + } + + if route.IsResolutionRequired() { + waker := &sleep.Waker{} + if err := route.Resolve(waker); err != nil { + if err == tcpip.ErrWouldBlock { + // Link address needs to be resolved. Resolution was triggered the background. + // Better luck next time. + // + // TODO: queue up the request and send after link address + // is resolved. + route.RemoveWaker(waker) + return 0, tcpip.ErrNoLinkAddress + } + return 0, err + } + } + + v, err := p.Get(p.Size()) + if err != nil { + return 0, err + } + sendUDP(route, v, e.id.LocalPort, dstPort) + return uintptr(len(v)), nil +} + +// Peek only returns data from a single datagram, so do nothing here. +func (e *endpoint) Peek([][]byte) (uintptr, *tcpip.Error) { + return 0, nil +} + +// SetSockOpt sets a socket option. Currently not supported. +func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error { + // TODO: Actually implement this. + switch v := opt.(type) { + case tcpip.V6OnlyOption: + // We only recognize this option on v6 endpoints. + if e.netProto != header.IPv6ProtocolNumber { + return tcpip.ErrInvalidEndpointState + } + + e.mu.Lock() + defer e.mu.Unlock() + + // We only allow this to be set when we're in the initial state. + if e.state != stateInitial { + return tcpip.ErrInvalidEndpointState + } + + e.v6only = v != 0 + } + return nil +} + +// GetSockOpt implements tcpip.Endpoint.GetSockOpt. +func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error { + switch o := opt.(type) { + case tcpip.ErrorOption: + return nil + + case *tcpip.SendBufferSizeOption: + e.mu.Lock() + *o = tcpip.SendBufferSizeOption(e.sndBufSize) + e.mu.Unlock() + return nil + + case *tcpip.ReceiveBufferSizeOption: + e.rcvMu.Lock() + *o = tcpip.ReceiveBufferSizeOption(e.rcvBufSizeMax) + e.rcvMu.Unlock() + return nil + + case *tcpip.V6OnlyOption: + // We only recognize this option on v6 endpoints. + if e.netProto != header.IPv6ProtocolNumber { + return tcpip.ErrUnknownProtocolOption + } + + e.mu.Lock() + v := e.v6only + e.mu.Unlock() + + *o = 0 + if v { + *o = 1 + } + return nil + + case *tcpip.ReceiveQueueSizeOption: + e.rcvMu.Lock() + if e.rcvList.Empty() { + *o = 0 + } else { + p := e.rcvList.Front() + *o = tcpip.ReceiveQueueSizeOption(p.data.Size()) + } + e.rcvMu.Unlock() + return nil + } + + return tcpip.ErrUnknownProtocolOption +} + +// sendUDP sends a UDP segment via the provided network endpoint and under the +// provided identity. +func sendUDP(r *stack.Route, data buffer.View, localPort, remotePort uint16) *tcpip.Error { + // Allocate a buffer for the UDP header. + hdr := buffer.NewPrependable(header.UDPMinimumSize + int(r.MaxHeaderLength())) + + // Initialize the header. + udp := header.UDP(hdr.Prepend(header.UDPMinimumSize)) + + length := uint16(hdr.UsedLength()) + uint16(len(data)) + udp.Encode(&header.UDPFields{ + SrcPort: localPort, + DstPort: remotePort, + Length: length, + }) + + // Only calculate the checksum if offloading isn't supported. + if r.Capabilities()&stack.CapabilityChecksumOffload == 0 { + xsum := r.PseudoHeaderChecksum(ProtocolNumber) + if data != nil { + xsum = header.Checksum(data, xsum) + } + + udp.SetChecksum(^udp.CalculateChecksum(xsum, length)) + } + + return r.WritePacket(&hdr, data, ProtocolNumber) +} + +func (e *endpoint) checkV4Mapped(addr *tcpip.FullAddress, allowMismatch bool) (tcpip.NetworkProtocolNumber, *tcpip.Error) { + netProto := e.netProto + if header.IsV4MappedAddress(addr.Addr) { + // Fail if using a v4 mapped address on a v6only endpoint. + if e.v6only { + return 0, tcpip.ErrNoRoute + } + + netProto = header.IPv4ProtocolNumber + addr.Addr = addr.Addr[header.IPv6AddressSize-header.IPv4AddressSize:] + if addr.Addr == "\x00\x00\x00\x00" { + addr.Addr = "" + } + } + + // Fail if we're bound to an address length different from the one we're + // checking. + if l := len(e.id.LocalAddress); !allowMismatch && l != 0 && l != len(addr.Addr) { + return 0, tcpip.ErrInvalidEndpointState + } + + return netProto, nil +} + +// Connect connects the endpoint to its peer. Specifying a NIC is optional. +func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { + if addr.Port == 0 { + // We don't support connecting to port zero. + return tcpip.ErrInvalidEndpointState + } + + e.mu.Lock() + defer e.mu.Unlock() + + nicid := addr.NIC + localPort := uint16(0) + switch e.state { + case stateInitial: + case stateBound, stateConnected: + localPort = e.id.LocalPort + if e.bindNICID == 0 { + break + } + + if nicid != 0 && nicid != e.bindNICID { + return tcpip.ErrInvalidEndpointState + } + + nicid = e.bindNICID + default: + return tcpip.ErrInvalidEndpointState + } + + netProto, err := e.checkV4Mapped(&addr, false) + if err != nil { + return err + } + + // Find a route to the desired destination. + r, err := e.stack.FindRoute(nicid, e.bindAddr, addr.Addr, netProto) + if err != nil { + return err + } + defer r.Release() + + id := stack.TransportEndpointID{ + LocalAddress: r.LocalAddress, + LocalPort: localPort, + RemotePort: addr.Port, + RemoteAddress: r.RemoteAddress, + } + + // Even if we're connected, this endpoint can still be used to send + // packets on a different network protocol, so we register both even if + // v6only is set to false and this is an ipv6 endpoint. + netProtos := []tcpip.NetworkProtocolNumber{netProto} + if e.netProto == header.IPv6ProtocolNumber && !e.v6only { + netProtos = []tcpip.NetworkProtocolNumber{ + header.IPv4ProtocolNumber, + header.IPv6ProtocolNumber, + } + } + + id, err = e.registerWithStack(nicid, netProtos, id) + if err != nil { + return err + } + + // Remove the old registration. + if e.id.LocalPort != 0 { + e.stack.UnregisterTransportEndpoint(e.regNICID, e.effectiveNetProtos, ProtocolNumber, e.id) + } + + e.id = id + e.route = r.Clone() + e.dstPort = addr.Port + e.regNICID = nicid + e.effectiveNetProtos = netProtos + + e.state = stateConnected + + e.rcvMu.Lock() + e.rcvReady = true + e.rcvMu.Unlock() + + return nil +} + +// ConnectEndpoint is not supported. +func (*endpoint) ConnectEndpoint(tcpip.Endpoint) *tcpip.Error { + return tcpip.ErrInvalidEndpointState +} + +// Shutdown closes the read and/or write end of the endpoint connection +// to its peer. +func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { + e.mu.RLock() + defer e.mu.RUnlock() + + if e.state != stateConnected { + return tcpip.ErrNotConnected + } + + if flags&tcpip.ShutdownRead != 0 { + e.rcvMu.Lock() + wasClosed := e.rcvClosed + e.rcvClosed = true + e.rcvMu.Unlock() + + if !wasClosed { + e.waiterQueue.Notify(waiter.EventIn) + } + } + + return nil +} + +// Listen is not supported by UDP, it just fails. +func (*endpoint) Listen(int) *tcpip.Error { + return tcpip.ErrNotSupported +} + +// Accept is not supported by UDP, it just fails. +func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { + return nil, nil, tcpip.ErrNotSupported +} + +func (e *endpoint) registerWithStack(nicid tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, *tcpip.Error) { + if id.LocalPort != 0 { + // The endpoint already has a local port, just attempt to + // register it. + err := e.stack.RegisterTransportEndpoint(nicid, netProtos, ProtocolNumber, id, e) + return id, err + } + + // We need to find a port for the endpoint. + _, err := e.stack.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) { + id.LocalPort = p + err := e.stack.RegisterTransportEndpoint(nicid, netProtos, ProtocolNumber, id, e) + switch err { + case nil: + return true, nil + case tcpip.ErrPortInUse: + return false, nil + default: + return false, err + } + }) + + return id, err +} + +func (e *endpoint) bindLocked(addr tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + // Don't allow binding once endpoint is not in the initial state + // anymore. + if e.state != stateInitial { + return tcpip.ErrInvalidEndpointState + } + + netProto, err := e.checkV4Mapped(&addr, false) + if err != nil { + return err + } + + // Expand netProtos to include v4 and v6 if the caller is binding to a + // wildcard (empty) address, and this is an IPv6 endpoint with v6only + // set to false. + netProtos := []tcpip.NetworkProtocolNumber{netProto} + if netProto == header.IPv6ProtocolNumber && !e.v6only && addr.Addr == "" { + netProtos = []tcpip.NetworkProtocolNumber{ + header.IPv6ProtocolNumber, + header.IPv4ProtocolNumber, + } + } + + if len(addr.Addr) != 0 { + // A local address was specified, verify that it's valid. + if e.stack.CheckLocalAddress(addr.NIC, netProto, addr.Addr) == 0 { + return tcpip.ErrBadLocalAddress + } + } + + id := stack.TransportEndpointID{ + LocalPort: addr.Port, + LocalAddress: addr.Addr, + } + id, err = e.registerWithStack(addr.NIC, netProtos, id) + if err != nil { + return err + } + if commit != nil { + if err := commit(); err != nil { + // Unregister, the commit failed. + e.stack.UnregisterTransportEndpoint(addr.NIC, netProtos, ProtocolNumber, id) + return err + } + } + + e.id = id + e.regNICID = addr.NIC + e.effectiveNetProtos = netProtos + + // Mark endpoint as bound. + e.state = stateBound + + e.rcvMu.Lock() + e.rcvReady = true + e.rcvMu.Unlock() + + return nil +} + +// Bind binds the endpoint to a specific local address and port. +// Specifying a NIC is optional. +func (e *endpoint) Bind(addr tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + e.mu.Lock() + defer e.mu.Unlock() + + err := e.bindLocked(addr, commit) + if err != nil { + return err + } + + e.bindNICID = addr.NIC + e.bindAddr = addr.Addr + + return nil +} + +// GetLocalAddress returns the address to which the endpoint is bound. +func (e *endpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + return tcpip.FullAddress{ + NIC: e.regNICID, + Addr: e.id.LocalAddress, + Port: e.id.LocalPort, + }, nil +} + +// GetRemoteAddress returns the address to which the endpoint is connected. +func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { + e.mu.RLock() + defer e.mu.RUnlock() + + if e.state != stateConnected { + return tcpip.FullAddress{}, tcpip.ErrNotConnected + } + + return tcpip.FullAddress{ + NIC: e.regNICID, + Addr: e.id.RemoteAddress, + Port: e.id.RemotePort, + }, nil +} + +// Readiness returns the current readiness of the endpoint. For example, if +// waiter.EventIn is set, the endpoint is immediately readable. +func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + // The endpoint is always writable. + result := waiter.EventOut & mask + + // Determine if the endpoint is readable if requested. + if (mask & waiter.EventIn) != 0 { + e.rcvMu.Lock() + if !e.rcvList.Empty() || e.rcvClosed { + result |= waiter.EventIn + } + e.rcvMu.Unlock() + } + + return result +} + +// HandlePacket is called by the stack when new packets arrive to this transport +// endpoint. +func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv *buffer.VectorisedView) { + // Get the header then trim it from the view. + hdr := header.UDP(vv.First()) + if int(hdr.Length()) > vv.Size() { + // Malformed packet. + return + } + + vv.TrimFront(header.UDPMinimumSize) + + e.rcvMu.Lock() + + // Drop the packet if our buffer is currently full. + if !e.rcvReady || e.rcvClosed || e.rcvBufSize >= e.rcvBufSizeMax { + e.rcvMu.Unlock() + return + } + + wasEmpty := e.rcvBufSize == 0 + + // Push new packet into receive list and increment the buffer size. + pkt := &udpPacket{ + senderAddress: tcpip.FullAddress{ + NIC: r.NICID(), + Addr: id.RemoteAddress, + Port: hdr.SourcePort(), + }, + } + pkt.data = vv.Clone(pkt.views[:]) + e.rcvList.PushBack(pkt) + e.rcvBufSize += vv.Size() + + e.rcvMu.Unlock() + + // Notify any waiters that there's data to be read now. + if wasEmpty { + e.waiterQueue.Notify(waiter.EventIn) + } +} + +// HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket. +func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, vv *buffer.VectorisedView) { +} diff --git a/pkg/tcpip/transport/udp/endpoint_state.go b/pkg/tcpip/transport/udp/endpoint_state.go new file mode 100644 index 000000000..41b98424a --- /dev/null +++ b/pkg/tcpip/transport/udp/endpoint_state.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package udp + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" +) + +// saveData saves udpPacket.data field. +func (u *udpPacket) saveData() buffer.VectorisedView { + // We canoot save u.data directly as u.data.views may alias to u.views, + // which is not allowed by state framework (in-struct pointer). + return u.data.Clone(nil) +} + +// loadData loads udpPacket.data field. +func (u *udpPacket) loadData(data buffer.VectorisedView) { + // NOTE: We cannot do the u.data = data.Clone(u.views[:]) optimization + // here because data.views is not guaranteed to be loaded by now. Plus, + // data.views will be allocated anyway so there really is little point + // of utilizing u.views for data.views. + u.data = data +} + +// beforeSave is invoked by stateify. +func (e *endpoint) beforeSave() { + // Stop incoming packets from being handled (and mutate endpoint state). + // The lock will be released after savercvBufSizeMax(), which would have + // saved e.rcvBufSizeMax and set it to 0 to continue blocking incoming + // packets. + e.rcvMu.Lock() +} + +// saveRcvBufSizeMax is invoked by stateify. +func (e *endpoint) saveRcvBufSizeMax() int { + max := e.rcvBufSizeMax + // Make sure no new packets will be handled regardless of the lock. + e.rcvBufSizeMax = 0 + // Release the lock acquired in beforeSave() so regular endpoint closing + // logic can proceed after save. + e.rcvMu.Unlock() + return max +} + +// loadRcvBufSizeMax is invoked by stateify. +func (e *endpoint) loadRcvBufSizeMax(max int) { + e.rcvBufSizeMax = max +} + +// afterLoad is invoked by stateify. +func (e *endpoint) afterLoad() { + e.stack = stack.StackFromEnv + + if e.state != stateBound && e.state != stateConnected { + return + } + + netProto := e.effectiveNetProtos[0] + // Connect() and bindLocked() both assert + // + // netProto == header.IPv6ProtocolNumber + // + // before creating a multi-entry effectiveNetProtos. + if len(e.effectiveNetProtos) > 1 { + netProto = header.IPv6ProtocolNumber + } + + var err *tcpip.Error + if e.state == stateConnected { + e.route, err = e.stack.FindRoute(e.regNICID, e.bindAddr, e.id.RemoteAddress, netProto) + if err != nil { + panic(*err) + } + + e.id.LocalAddress = e.route.LocalAddress + } else if len(e.id.LocalAddress) != 0 { // stateBound + if e.stack.CheckLocalAddress(e.regNICID, netProto, e.id.LocalAddress) == 0 { + panic(tcpip.ErrBadLocalAddress) + } + } + + e.id, err = e.registerWithStack(e.regNICID, e.effectiveNetProtos, e.id) + if err != nil { + panic(*err) + } +} diff --git a/pkg/tcpip/transport/udp/protocol.go b/pkg/tcpip/transport/udp/protocol.go new file mode 100644 index 000000000..fa30e7201 --- /dev/null +++ b/pkg/tcpip/transport/udp/protocol.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package udp contains the implementation of the UDP transport protocol. To use +// it in the networking stack, this package must be added to the project, and +// activated on the stack by passing udp.ProtocolName (or "udp") as one of the +// transport protocols when calling stack.New(). Then endpoints can be created +// by passing udp.ProtocolNumber as the transport protocol number when calling +// Stack.NewEndpoint(). +package udp + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + // ProtocolName is the string representation of the udp protocol name. + ProtocolName = "udp" + + // ProtocolNumber is the udp protocol number. + ProtocolNumber = header.UDPProtocolNumber +) + +type protocol struct{} + +// Number returns the udp protocol number. +func (*protocol) Number() tcpip.TransportProtocolNumber { + return ProtocolNumber +} + +// NewEndpoint creates a new udp endpoint. +func (*protocol) NewEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { + return newEndpoint(stack, netProto, waiterQueue), nil +} + +// MinimumPacketSize returns the minimum valid udp packet size. +func (*protocol) MinimumPacketSize() int { + return header.UDPMinimumSize +} + +// ParsePorts returns the source and destination ports stored in the given udp +// packet. +func (*protocol) ParsePorts(v buffer.View) (src, dst uint16, err *tcpip.Error) { + h := header.UDP(v) + return h.SourcePort(), h.DestinationPort(), nil +} + +// HandleUnknownDestinationPacket handles packets targeted at this protocol but +// that don't match any existing endpoint. +func (p *protocol) HandleUnknownDestinationPacket(*stack.Route, stack.TransportEndpointID, *buffer.VectorisedView) bool { + return true +} + +// SetOption implements TransportProtocol.SetOption. +func (p *protocol) SetOption(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +// Option implements TransportProtocol.Option. +func (p *protocol) Option(option interface{}) *tcpip.Error { + return tcpip.ErrUnknownProtocolOption +} + +func init() { + stack.RegisterTransportProtocolFactory(ProtocolName, func() stack.TransportProtocol { + return &protocol{} + }) +} diff --git a/pkg/tcpip/transport/udp/udp_test.go b/pkg/tcpip/transport/udp/udp_test.go new file mode 100644 index 000000000..65c567952 --- /dev/null +++ b/pkg/tcpip/transport/udp/udp_test.go @@ -0,0 +1,625 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package udp_test + +import ( + "bytes" + "math/rand" + "testing" + "time" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/checker" + "gvisor.googlesource.com/gvisor/pkg/tcpip/header" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/channel" + "gvisor.googlesource.com/gvisor/pkg/tcpip/link/sniffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv4" + "gvisor.googlesource.com/gvisor/pkg/tcpip/network/ipv6" + "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/udp" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +const ( + stackV6Addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01" + testV6Addr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02" + stackV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + stackAddr + testV4MappedAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff" + testAddr + V4MappedWildcardAddr = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00" + + stackAddr = "\x0a\x00\x00\x01" + stackPort = 1234 + testAddr = "\x0a\x00\x00\x02" + testPort = 4096 + + // defaultMTU is the MTU, in bytes, used throughout the tests, except + // where another value is explicitly used. It is chosen to match the MTU + // of loopback interfaces on linux systems. + defaultMTU = 65536 +) + +type testContext struct { + t *testing.T + linkEP *channel.Endpoint + s *stack.Stack + + ep tcpip.Endpoint + wq waiter.Queue +} + +type headers struct { + srcPort uint16 + dstPort uint16 +} + +func newDualTestContext(t *testing.T, mtu uint32) *testContext { + s := stack.New([]string{ipv4.ProtocolName, ipv6.ProtocolName}, []string{udp.ProtocolName}) + + id, linkEP := channel.New(256, mtu, "") + if testing.Verbose() { + id = sniffer.New(id) + } + if err := s.CreateNIC(1, id); err != nil { + t.Fatalf("CreateNIC failed: %v", err) + } + + if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + if err := s.AddAddress(1, ipv6.ProtocolNumber, stackV6Addr); err != nil { + t.Fatalf("AddAddress failed: %v", err) + } + + s.SetRouteTable([]tcpip.Route{ + { + Destination: "\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + { + Destination: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + Mask: "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + Gateway: "", + NIC: 1, + }, + }) + + return &testContext{ + t: t, + s: s, + linkEP: linkEP, + } +} + +func (c *testContext) cleanup() { + if c.ep != nil { + c.ep.Close() + } +} + +func (c *testContext) createV6Endpoint(v4only bool) { + var err *tcpip.Error + c.ep, err = c.s.NewEndpoint(udp.ProtocolNumber, ipv6.ProtocolNumber, &c.wq) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + + var v tcpip.V6OnlyOption + if v4only { + v = 1 + } + if err := c.ep.SetSockOpt(v); err != nil { + c.t.Fatalf("SetSockOpt failed failed: %v", err) + } +} + +func (c *testContext) getV6Packet() []byte { + select { + case p := <-c.linkEP.C: + if p.Proto != ipv6.ProtocolNumber { + c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv6.ProtocolNumber) + } + b := make([]byte, len(p.Header)+len(p.Payload)) + copy(b, p.Header) + copy(b[len(p.Header):], p.Payload) + + checker.IPv6(c.t, b, checker.SrcAddr(stackV6Addr), checker.DstAddr(testV6Addr)) + return b + + case <-time.After(2 * time.Second): + c.t.Fatalf("Packet wasn't written out") + } + + return nil +} + +func (c *testContext) getPacket() []byte { + select { + case p := <-c.linkEP.C: + if p.Proto != ipv4.ProtocolNumber { + c.t.Fatalf("Bad network protocol: got %v, wanted %v", p.Proto, ipv4.ProtocolNumber) + } + b := make([]byte, len(p.Header)+len(p.Payload)) + copy(b, p.Header) + copy(b[len(p.Header):], p.Payload) + + checker.IPv4(c.t, b, checker.SrcAddr(stackAddr), checker.DstAddr(testAddr)) + return b + + case <-time.After(2 * time.Second): + c.t.Fatalf("Packet wasn't written out") + } + + return nil +} + +func (c *testContext) sendV6Packet(payload []byte, h *headers) { + // Allocate a buffer for data and headers. + buf := buffer.NewView(header.UDPMinimumSize + header.IPv6MinimumSize + len(payload)) + copy(buf[len(buf)-len(payload):], payload) + + // Initialize the IP header. + ip := header.IPv6(buf) + ip.Encode(&header.IPv6Fields{ + PayloadLength: uint16(header.UDPMinimumSize + len(payload)), + NextHeader: uint8(udp.ProtocolNumber), + HopLimit: 65, + SrcAddr: testV6Addr, + DstAddr: stackV6Addr, + }) + + // Initialize the UDP header. + u := header.UDP(buf[header.IPv6MinimumSize:]) + u.Encode(&header.UDPFields{ + SrcPort: h.srcPort, + DstPort: h.dstPort, + Length: uint16(header.UDPMinimumSize + len(payload)), + }) + + // Calculate the UDP pseudo-header checksum. + xsum := header.Checksum([]byte(testV6Addr), 0) + xsum = header.Checksum([]byte(stackV6Addr), xsum) + xsum = header.Checksum([]byte{0, uint8(udp.ProtocolNumber)}, xsum) + + // Calculate the UDP checksum and set it. + length := uint16(header.UDPMinimumSize + len(payload)) + xsum = header.Checksum(payload, xsum) + u.SetChecksum(^u.CalculateChecksum(xsum, length)) + + // Inject packet. + var views [1]buffer.View + vv := buf.ToVectorisedView(views) + c.linkEP.Inject(ipv6.ProtocolNumber, &vv) +} + +func (c *testContext) sendPacket(payload []byte, h *headers) { + // Allocate a buffer for data and headers. + buf := buffer.NewView(header.UDPMinimumSize + header.IPv4MinimumSize + len(payload)) + copy(buf[len(buf)-len(payload):], payload) + + // Initialize the IP header. + ip := header.IPv4(buf) + ip.Encode(&header.IPv4Fields{ + IHL: header.IPv4MinimumSize, + TotalLength: uint16(len(buf)), + TTL: 65, + Protocol: uint8(udp.ProtocolNumber), + SrcAddr: testAddr, + DstAddr: stackAddr, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + // Initialize the UDP header. + u := header.UDP(buf[header.IPv4MinimumSize:]) + u.Encode(&header.UDPFields{ + SrcPort: h.srcPort, + DstPort: h.dstPort, + Length: uint16(header.UDPMinimumSize + len(payload)), + }) + + // Calculate the UDP pseudo-header checksum. + xsum := header.Checksum([]byte(testAddr), 0) + xsum = header.Checksum([]byte(stackAddr), xsum) + xsum = header.Checksum([]byte{0, uint8(udp.ProtocolNumber)}, xsum) + + // Calculate the UDP checksum and set it. + length := uint16(header.UDPMinimumSize + len(payload)) + xsum = header.Checksum(payload, xsum) + u.SetChecksum(^u.CalculateChecksum(xsum, length)) + + // Inject packet. + var views [1]buffer.View + vv := buf.ToVectorisedView(views) + c.linkEP.Inject(ipv4.ProtocolNumber, &vv) +} + +func newPayload() []byte { + b := make([]byte, 30+rand.Intn(100)) + for i := range b { + b[i] = byte(rand.Intn(256)) + } + return b +} + +func testV4Read(c *testContext) { + // Send a packet. + payload := newPayload() + c.sendPacket(payload, &headers{ + srcPort: testPort, + dstPort: stackPort, + }) + + // Try to receive the data. + we, ch := waiter.NewChannelEntry(nil) + c.wq.EventRegister(&we, waiter.EventIn) + defer c.wq.EventUnregister(&we) + + var addr tcpip.FullAddress + v, err := c.ep.Read(&addr) + if err == tcpip.ErrWouldBlock { + // Wait for data to become available. + select { + case <-ch: + v, err = c.ep.Read(&addr) + if err != nil { + c.t.Fatalf("Read failed: %v", err) + } + + case <-time.After(1 * time.Second): + c.t.Fatalf("Timed out waiting for data") + } + } + + // Check the peer address. + if addr.Addr != testAddr { + c.t.Fatalf("Unexpected remote address: got %v, want %v", addr.Addr, testAddr) + } + + // Check the payload. + if !bytes.Equal(payload, v) { + c.t.Fatalf("Bad payload: got %x, want %x", v, payload) + } +} + +func TestV4ReadOnV6(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to wildcard. + if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Read(c) +} + +func TestV4ReadOnBoundToV4MappedWildcard(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to v4 mapped wildcard. + if err := c.ep.Bind(tcpip.FullAddress{Addr: V4MappedWildcardAddr, Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Read(c) +} + +func TestV4ReadOnBoundToV4Mapped(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to local address. + if err := c.ep.Bind(tcpip.FullAddress{Addr: stackV4MappedAddr, Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Read(c) +} + +func TestV6ReadOnV6(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to wildcard. + if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Send a packet. + payload := newPayload() + c.sendV6Packet(payload, &headers{ + srcPort: testPort, + dstPort: stackPort, + }) + + // Try to receive the data. + we, ch := waiter.NewChannelEntry(nil) + c.wq.EventRegister(&we, waiter.EventIn) + defer c.wq.EventUnregister(&we) + + var addr tcpip.FullAddress + v, err := c.ep.Read(&addr) + if err == tcpip.ErrWouldBlock { + // Wait for data to become available. + select { + case <-ch: + v, err = c.ep.Read(&addr) + if err != nil { + c.t.Fatalf("Read failed: %v", err) + } + + case <-time.After(1 * time.Second): + c.t.Fatalf("Timed out waiting for data") + } + } + + // Check the peer address. + if addr.Addr != testV6Addr { + c.t.Fatalf("Unexpected remote address: got %v, want %v", addr.Addr, testAddr) + } + + // Check the payload. + if !bytes.Equal(payload, v) { + c.t.Fatalf("Bad payload: got %x, want %x", v, payload) + } +} + +func TestV4ReadOnV4(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + // Create v4 UDP endpoint. + var err *tcpip.Error + c.ep, err = c.s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &c.wq) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + + // Bind to wildcard. + if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + testV4Read(c) +} + +func testDualWrite(c *testContext) uint16 { + // Write to V4 mapped address. + payload := buffer.View(newPayload()) + n, err := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{ + To: &tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}, + }) + if err != nil { + c.t.Fatalf("Write failed: %v", err) + } + if n != uintptr(len(payload)) { + c.t.Fatalf("Bad number of bytes written: got %v, want %v", n, len(payload)) + } + + // Check that we received the packet. + b := c.getPacket() + udp := header.UDP(header.IPv4(b).Payload()) + checker.IPv4(c.t, b, + checker.UDP( + checker.DstPort(testPort), + ), + ) + + port := udp.SourcePort() + + // Check the payload. + if !bytes.Equal(payload, udp.Payload()) { + c.t.Fatalf("Bad payload: got %x, want %x", udp.Payload(), payload) + } + + // Write to v6 address. + payload = buffer.View(newPayload()) + n, err = c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{ + To: &tcpip.FullAddress{Addr: testV6Addr, Port: testPort}, + }) + if err != nil { + c.t.Fatalf("Write failed: %v", err) + } + if n != uintptr(len(payload)) { + c.t.Fatalf("Bad number of bytes written: got %v, want %v", n, len(payload)) + } + + // Check that we received the packet, and that the source port is the + // same as the one used in ipv4. + b = c.getV6Packet() + udp = header.UDP(header.IPv6(b).Payload()) + checker.IPv6(c.t, b, + checker.UDP( + checker.DstPort(testPort), + checker.SrcPort(port), + ), + ) + + // Check the payload. + if !bytes.Equal(payload, udp.Payload()) { + c.t.Fatalf("Bad payload: got %x, want %x", udp.Payload(), payload) + } + + return port +} + +func TestDualWriteUnbound(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + testDualWrite(c) +} + +func TestDualWriteBoundToWildcard(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to wildcard. + if err := c.ep.Bind(tcpip.FullAddress{Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + p := testDualWrite(c) + if p != stackPort { + c.t.Fatalf("Bad port: got %v, want %v", p, stackPort) + } +} + +func TestDualWriteConnectedToV6(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Connect to v6 address. + if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + testDualWrite(c) +} + +func TestDualWriteConnectedToV4Mapped(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Connect to v4 mapped address. + if err := c.ep.Connect(tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + testDualWrite(c) +} + +func TestV4WriteOnV6Only(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(true) + + // Write to V4 mapped address. + payload := buffer.View(newPayload()) + _, err := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{ + To: &tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}, + }) + if err != tcpip.ErrNoRoute { + c.t.Fatalf("Write returned unexpected error: got %v, want %v", err, tcpip.ErrNoRoute) + } +} + +func TestV6WriteOnBoundToV4Mapped(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Bind to v4 mapped address. + if err := c.ep.Bind(tcpip.FullAddress{Addr: stackV4MappedAddr, Port: stackPort}, nil); err != nil { + c.t.Fatalf("Bind failed: %v", err) + } + + // Write to v6 address. + payload := buffer.View(newPayload()) + _, err := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{ + To: &tcpip.FullAddress{Addr: testV6Addr, Port: testPort}, + }) + if err != tcpip.ErrNoRoute { + c.t.Fatalf("Write returned unexpected error: got %v, want %v", err, tcpip.ErrNoRoute) + } +} + +func TestV6WriteOnConnected(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Connect to v6 address. + if err := c.ep.Connect(tcpip.FullAddress{Addr: testV6Addr, Port: testPort}); err != nil { + c.t.Fatalf("Connect failed: %v", err) + } + + // Write without destination. + payload := buffer.View(newPayload()) + n, err := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{}) + if err != nil { + c.t.Fatalf("Write failed: %v", err) + } + if n != uintptr(len(payload)) { + c.t.Fatalf("Bad number of bytes written: got %v, want %v", n, len(payload)) + } + + // Check that we received the packet. + b := c.getV6Packet() + udp := header.UDP(header.IPv6(b).Payload()) + checker.IPv6(c.t, b, + checker.UDP( + checker.DstPort(testPort), + ), + ) + + // Check the payload. + if !bytes.Equal(payload, udp.Payload()) { + c.t.Fatalf("Bad payload: got %x, want %x", udp.Payload(), payload) + } +} + +func TestV4WriteOnConnected(t *testing.T) { + c := newDualTestContext(t, defaultMTU) + defer c.cleanup() + + c.createV6Endpoint(false) + + // Connect to v4 mapped address. + if err := c.ep.Connect(tcpip.FullAddress{Addr: testV4MappedAddr, Port: testPort}); err != nil { + c.t.Fatalf("Connect failed: %v", err) + } + + // Write without destination. + payload := buffer.View(newPayload()) + n, err := c.ep.Write(tcpip.SlicePayload(payload), tcpip.WriteOptions{}) + if err != nil { + c.t.Fatalf("Write failed: %v", err) + } + if n != uintptr(len(payload)) { + c.t.Fatalf("Bad number of bytes written: got %v, want %v", n, len(payload)) + } + + // Check that we received the packet. + b := c.getPacket() + udp := header.UDP(header.IPv4(b).Payload()) + checker.IPv4(c.t, b, + checker.UDP( + checker.DstPort(testPort), + ), + ) + + // Check the payload. + if !bytes.Equal(payload, udp.Payload()) { + c.t.Fatalf("Bad payload: got %x, want %x", udp.Payload(), payload) + } +} diff --git a/pkg/tcpip/transport/unix/BUILD b/pkg/tcpip/transport/unix/BUILD new file mode 100644 index 000000000..47bc7a649 --- /dev/null +++ b/pkg/tcpip/transport/unix/BUILD @@ -0,0 +1,37 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "unix_state", + srcs = [ + "connectioned.go", + "connectionless.go", + "unix.go", + ], + out = "unix_state.go", + package = "unix", +) + +go_library( + name = "unix", + srcs = [ + "connectioned.go", + "connectioned_state.go", + "connectionless.go", + "unix.go", + "unix_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/unix", + visibility = ["//:sandbox"], + deps = [ + "//pkg/ilist", + "//pkg/log", + "//pkg/state", + "//pkg/tcpip", + "//pkg/tcpip/buffer", + "//pkg/tcpip/transport/queue", + "//pkg/waiter", + ], +) diff --git a/pkg/tcpip/transport/unix/connectioned.go b/pkg/tcpip/transport/unix/connectioned.go new file mode 100644 index 000000000..def1b2c99 --- /dev/null +++ b/pkg/tcpip/transport/unix/connectioned.go @@ -0,0 +1,431 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/queue" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// UniqueIDProvider generates a sequence of unique identifiers useful for, +// among other things, lock ordering. +type UniqueIDProvider interface { + // UniqueID returns a new unique identifier. + UniqueID() uint64 +} + +// A ConnectingEndpoint is a connectioned unix endpoint that is attempting to +// establish a bidirectional connection with a BoundEndpoint. +type ConnectingEndpoint interface { + // ID returns the endpoint's globally unique identifier. This identifier + // must be used to determine locking order if more than one endpoint is + // to be locked in the same codepath. The endpoint with the smaller + // identifier must be locked before endpoints with larger identifiers. + ID() uint64 + + // Passcred implements socket.Credentialer.Passcred. + Passcred() bool + + // Type returns the socket type, typically either SockStream or + // SockSeqpacket. The connection attempt must be aborted if this + // value doesn't match the ConnectableEndpoint's type. + Type() SockType + + // GetLocalAddress returns the bound path. + GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) + + // Locker protects the following methods. While locked, only the holder of + // the lock can change the return value of the protected methods. + sync.Locker + + // Connected returns true iff the ConnectingEndpoint is in the connected + // state. ConnectingEndpoints can only be connected to a single endpoint, + // so the connection attempt must be aborted if this returns true. + Connected() bool + + // Listening returns true iff the ConnectingEndpoint is in the listening + // state. ConnectingEndpoints cannot make connections while listening, so + // the connection attempt must be aborted if this returns true. + Listening() bool + + // WaiterQueue returns a pointer to the endpoint's waiter queue. + WaiterQueue() *waiter.Queue +} + +// connectionedEndpoint is a Unix-domain connected or connectable endpoint and implements +// ConnectingEndpoint, ConnectableEndpoint and tcpip.Endpoint. +// +// connectionedEndpoints must be in connected state in order to transfer data. +// +// This implementation includes STREAM and SEQPACKET Unix sockets created with +// socket(2), accept(2) or socketpair(2) and dgram unix sockets created with +// socketpair(2). See unix_connectionless.go for the implementation of DGRAM +// Unix sockets created with socket(2). +// +// The state is much simpler than a TCP endpoint, so it is not encoded +// explicitly. Instead we enforce the following invariants: +// +// receiver != nil, connected != nil => connected. +// path != "" && acceptedChan == nil => bound, not listening. +// path != "" && acceptedChan != nil => bound and listening. +// +// Only one of these will be true at any moment. +type connectionedEndpoint struct { + baseEndpoint + + // id is the unique endpoint identifier. This is used exclusively for + // lock ordering within connect. + id uint64 + + // idGenerator is used to generate new unique endpoint identifiers. + idGenerator UniqueIDProvider + + // stype is used by connecting sockets to ensure that they are the + // same type. The value is typically either tcpip.SockSeqpacket or + // tcpip.SockStream. + stype SockType + + // acceptedChan is per the TCP endpoint implementation. Note that the + // sockets in this channel are _already in the connected state_, and + // have another associated connectionedEndpoint. + // + // If nil, then no listen call has been made. + acceptedChan chan *connectionedEndpoint `state:".([]*connectionedEndpoint)"` +} + +// NewConnectioned creates a new unbound connectionedEndpoint. +func NewConnectioned(stype SockType, uid UniqueIDProvider) Endpoint { + return &connectionedEndpoint{ + baseEndpoint: baseEndpoint{Queue: &waiter.Queue{}}, + id: uid.UniqueID(), + idGenerator: uid, + stype: stype, + } +} + +// NewPair allocates a new pair of connected unix-domain connectionedEndpoints. +func NewPair(stype SockType, uid UniqueIDProvider) (Endpoint, Endpoint) { + a := &connectionedEndpoint{ + baseEndpoint: baseEndpoint{Queue: &waiter.Queue{}}, + id: uid.UniqueID(), + idGenerator: uid, + stype: stype, + } + b := &connectionedEndpoint{ + baseEndpoint: baseEndpoint{Queue: &waiter.Queue{}}, + id: uid.UniqueID(), + idGenerator: uid, + stype: stype, + } + + q1 := queue.New(a.Queue, b.Queue, initialLimit) + q2 := queue.New(b.Queue, a.Queue, initialLimit) + + if stype == SockStream { + a.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{q1}} + b.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{q2}} + } else { + a.receiver = &queueReceiver{q1} + b.receiver = &queueReceiver{q2} + } + + a.connected = &connectedEndpoint{ + endpoint: b, + writeQueue: q2, + } + b.connected = &connectedEndpoint{ + endpoint: a, + writeQueue: q1, + } + + return a, b +} + +// ID implements ConnectingEndpoint.ID. +func (e *connectionedEndpoint) ID() uint64 { + return e.id +} + +// Type implements ConnectingEndpoint.Type and Endpoint.Type. +func (e *connectionedEndpoint) Type() SockType { + return e.stype +} + +// WaiterQueue implements ConnectingEndpoint.WaiterQueue. +func (e *connectionedEndpoint) WaiterQueue() *waiter.Queue { + return e.Queue +} + +// isBound returns true iff the connectionedEndpoint is bound (but not +// listening). +func (e *connectionedEndpoint) isBound() bool { + return e.path != "" && e.acceptedChan == nil +} + +// Listening implements ConnectingEndpoint.Listening. +func (e *connectionedEndpoint) Listening() bool { + return e.acceptedChan != nil +} + +// Close puts the connectionedEndpoint in a closed state and frees all +// resources associated with it. +// +// The socket will be a fresh state after a call to close and may be reused. +// That is, close may be used to "unbind" or "disconnect" the socket in error +// paths. +func (e *connectionedEndpoint) Close() { + e.Lock() + var c ConnectedEndpoint + var r Receiver + switch { + case e.Connected(): + e.connected.CloseSend() + e.receiver.CloseRecv() + c = e.connected + r = e.receiver + e.connected = nil + e.receiver = nil + case e.isBound(): + e.path = "" + case e.Listening(): + close(e.acceptedChan) + for n := range e.acceptedChan { + n.Close() + } + e.acceptedChan = nil + e.path = "" + } + e.Unlock() + if c != nil { + c.CloseNotify() + c.Release() + } + if r != nil { + r.CloseNotify() + r.Release() + } +} + +// BidirectionalConnect implements BoundEndpoint.BidirectionalConnect. +func (e *connectionedEndpoint) BidirectionalConnect(ce ConnectingEndpoint, returnConnect func(Receiver, ConnectedEndpoint)) *tcpip.Error { + if ce.Type() != e.stype { + return tcpip.ErrConnectionRefused + } + + // Check if ce is e to avoid a deadlock. + if ce, ok := ce.(*connectionedEndpoint); ok && ce == e { + return tcpip.ErrInvalidEndpointState + } + + // Do a dance to safely acquire locks on both endpoints. + if e.id < ce.ID() { + e.Lock() + ce.Lock() + } else { + ce.Lock() + e.Lock() + } + + // Check connecting state. + if ce.Connected() { + e.Unlock() + ce.Unlock() + return tcpip.ErrAlreadyConnected + } + if ce.Listening() { + e.Unlock() + ce.Unlock() + return tcpip.ErrInvalidEndpointState + } + + // Check bound state. + if !e.Listening() { + e.Unlock() + ce.Unlock() + return tcpip.ErrConnectionRefused + } + + // Create a newly bound connectionedEndpoint. + ne := &connectionedEndpoint{ + baseEndpoint: baseEndpoint{ + path: e.path, + Queue: &waiter.Queue{}, + }, + id: e.idGenerator.UniqueID(), + idGenerator: e.idGenerator, + stype: e.stype, + } + readQueue := queue.New(ce.WaiterQueue(), ne.Queue, initialLimit) + writeQueue := queue.New(ne.Queue, ce.WaiterQueue(), initialLimit) + ne.connected = &connectedEndpoint{ + endpoint: ce, + writeQueue: readQueue, + } + if e.stype == SockStream { + ne.receiver = &streamQueueReceiver{queueReceiver: queueReceiver{readQueue: writeQueue}} + } else { + ne.receiver = &queueReceiver{readQueue: writeQueue} + } + + select { + case e.acceptedChan <- ne: + // Commit state. + connected := &connectedEndpoint{ + endpoint: ne, + writeQueue: writeQueue, + } + if e.stype == SockStream { + returnConnect(&streamQueueReceiver{queueReceiver: queueReceiver{readQueue: readQueue}}, connected) + } else { + returnConnect(&queueReceiver{readQueue: readQueue}, connected) + } + + // Notify can deadlock if we are holding these locks. + e.Unlock() + ce.Unlock() + + // Notify on both ends. + e.Notify(waiter.EventIn) + ce.WaiterQueue().Notify(waiter.EventOut) + + return nil + default: + // Busy; return ECONNREFUSED per spec. + ne.Close() + e.Unlock() + ce.Unlock() + return tcpip.ErrConnectionRefused + } +} + +// UnidirectionalConnect implements BoundEndpoint.UnidirectionalConnect. +func (e *connectionedEndpoint) UnidirectionalConnect() (ConnectedEndpoint, *tcpip.Error) { + return nil, tcpip.ErrConnectionRefused +} + +// Connect attempts to directly connect to another Endpoint. +// Implements Endpoint.Connect. +func (e *connectionedEndpoint) Connect(server BoundEndpoint) *tcpip.Error { + returnConnect := func(r Receiver, ce ConnectedEndpoint) { + e.receiver = r + e.connected = ce + } + + return server.BidirectionalConnect(e, returnConnect) +} + +// Listen starts listening on the connection. +func (e *connectionedEndpoint) Listen(backlog int) *tcpip.Error { + e.Lock() + defer e.Unlock() + if e.Listening() { + // Adjust the size of the channel iff we can fix existing + // pending connections into the new one. + if len(e.acceptedChan) > backlog { + return tcpip.ErrInvalidEndpointState + } + origChan := e.acceptedChan + e.acceptedChan = make(chan *connectionedEndpoint, backlog) + close(origChan) + for ep := range origChan { + e.acceptedChan <- ep + } + return nil + } + if !e.isBound() { + return tcpip.ErrInvalidEndpointState + } + + // Normal case. + e.acceptedChan = make(chan *connectionedEndpoint, backlog) + return nil +} + +// Accept accepts a new connection. +func (e *connectionedEndpoint) Accept() (Endpoint, *tcpip.Error) { + e.Lock() + defer e.Unlock() + + if !e.Listening() { + return nil, tcpip.ErrInvalidEndpointState + } + + select { + case ne := <-e.acceptedChan: + return ne, nil + + default: + // Nothing left. + return nil, tcpip.ErrWouldBlock + } +} + +// Bind binds the connection. +// +// For Unix connectionedEndpoints, this _only sets the address associated with +// the socket_. Work associated with sockets in the filesystem or finding those +// sockets must be done by a higher level. +// +// Bind will fail only if the socket is connected, bound or the passed address +// is invalid (the empty string). +func (e *connectionedEndpoint) Bind(addr tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + e.Lock() + defer e.Unlock() + if e.isBound() || e.Listening() { + return tcpip.ErrAlreadyBound + } + if addr.Addr == "" { + // The empty string is not permitted. + return tcpip.ErrBadLocalAddress + } + if commit != nil { + if err := commit(); err != nil { + return err + } + } + + // Save the bound address. + e.path = string(addr.Addr) + return nil +} + +// SendMsg writes data and a control message to the endpoint's peer. +// This method does not block if the data cannot be written. +func (e *connectionedEndpoint) SendMsg(data [][]byte, c ControlMessages, to BoundEndpoint) (uintptr, *tcpip.Error) { + // Stream sockets do not support specifying the endpoint. Seqpacket + // sockets ignore the passed endpoint. + if e.stype == SockStream && to != nil { + return 0, tcpip.ErrNotSupported + } + return e.baseEndpoint.SendMsg(data, c, to) +} + +// Readiness returns the current readiness of the connectionedEndpoint. For +// example, if waiter.EventIn is set, the connectionedEndpoint is immediately +// readable. +func (e *connectionedEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + e.Lock() + defer e.Unlock() + + ready := waiter.EventMask(0) + switch { + case e.Connected(): + if mask&waiter.EventIn != 0 && e.receiver.Readable() { + ready |= waiter.EventIn + } + if mask&waiter.EventOut != 0 && e.connected.Writable() { + ready |= waiter.EventOut + } + case e.Listening(): + if mask&waiter.EventIn != 0 && len(e.acceptedChan) > 0 { + ready |= waiter.EventIn + } + } + + return ready +} diff --git a/pkg/tcpip/transport/unix/connectioned_state.go b/pkg/tcpip/transport/unix/connectioned_state.go new file mode 100644 index 000000000..5d835c8b2 --- /dev/null +++ b/pkg/tcpip/transport/unix/connectioned_state.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +// saveAcceptedChan is invoked by stateify. +func (e *connectionedEndpoint) saveAcceptedChan() []*connectionedEndpoint { + // If acceptedChan is nil (i.e. we are not listening) then we will save nil. + // Otherwise we create a (possibly empty) slice of the values in acceptedChan and + // save that. + var acceptedSlice []*connectionedEndpoint + if e.acceptedChan != nil { + // Swap out acceptedChan with a new empty channel of the same capacity. + saveChan := e.acceptedChan + e.acceptedChan = make(chan *connectionedEndpoint, cap(saveChan)) + + // Create a new slice with the same len and capacity as the channel. + acceptedSlice = make([]*connectionedEndpoint, len(saveChan), cap(saveChan)) + // Drain acceptedChan into saveSlice, and fill up the new acceptChan at the + // same time. + for i := range acceptedSlice { + ep := <-saveChan + acceptedSlice[i] = ep + e.acceptedChan <- ep + } + close(saveChan) + } + return acceptedSlice +} + +// loadAcceptedChan is invoked by stateify. +func (e *connectionedEndpoint) loadAcceptedChan(acceptedSlice []*connectionedEndpoint) { + // If acceptedSlice is nil, then acceptedChan should also be nil. + if acceptedSlice != nil { + // Otherwise, create a new channel with the same capacity as acceptedSlice. + e.acceptedChan = make(chan *connectionedEndpoint, cap(acceptedSlice)) + // Seed the channel with values from acceptedSlice. + for _, ep := range acceptedSlice { + e.acceptedChan <- ep + } + } +} diff --git a/pkg/tcpip/transport/unix/connectionless.go b/pkg/tcpip/transport/unix/connectionless.go new file mode 100644 index 000000000..34d34f99a --- /dev/null +++ b/pkg/tcpip/transport/unix/connectionless.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/queue" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// connectionlessEndpoint is a unix endpoint for unix sockets that support operating in +// a conectionless fashon. +// +// Specifically, this means datagram unix sockets not created with +// socketpair(2). +type connectionlessEndpoint struct { + baseEndpoint +} + +// NewConnectionless creates a new unbound dgram endpoint. +func NewConnectionless() Endpoint { + ep := &connectionlessEndpoint{baseEndpoint{Queue: &waiter.Queue{}}} + ep.receiver = &queueReceiver{readQueue: queue.New(&waiter.Queue{}, ep.Queue, initialLimit)} + return ep +} + +// isBound returns true iff the endpoint is bound. +func (e *connectionlessEndpoint) isBound() bool { + return e.path != "" +} + +// Close puts the endpoint in a closed state and frees all resources associated +// with it. +// +// The socket will be a fresh state after a call to close and may be reused. +// That is, close may be used to "unbind" or "disconnect" the socket in error +// paths. +func (e *connectionlessEndpoint) Close() { + e.Lock() + var r Receiver + if e.Connected() { + e.receiver.CloseRecv() + r = e.receiver + e.receiver = nil + + e.connected.Release() + e.connected = nil + } + if e.isBound() { + e.path = "" + } + e.Unlock() + if r != nil { + r.CloseNotify() + r.Release() + } +} + +// BidirectionalConnect implements BoundEndpoint.BidirectionalConnect. +func (e *connectionlessEndpoint) BidirectionalConnect(ce ConnectingEndpoint, returnConnect func(Receiver, ConnectedEndpoint)) *tcpip.Error { + return tcpip.ErrConnectionRefused +} + +// UnidirectionalConnect implements BoundEndpoint.UnidirectionalConnect. +func (e *connectionlessEndpoint) UnidirectionalConnect() (ConnectedEndpoint, *tcpip.Error) { + return &connectedEndpoint{ + endpoint: e, + writeQueue: e.receiver.(*queueReceiver).readQueue, + }, nil +} + +// SendMsg writes data and a control message to the specified endpoint. +// This method does not block if the data cannot be written. +func (e *connectionlessEndpoint) SendMsg(data [][]byte, c ControlMessages, to BoundEndpoint) (uintptr, *tcpip.Error) { + if to == nil { + return e.baseEndpoint.SendMsg(data, c, nil) + } + + connected, err := to.UnidirectionalConnect() + if err != nil { + return 0, tcpip.ErrInvalidEndpointState + } + defer connected.Release() + + e.Lock() + n, notify, err := connected.Send(data, c, tcpip.FullAddress{Addr: tcpip.Address(e.path)}) + e.Unlock() + if err != nil { + return 0, err + } + if notify { + connected.SendNotify() + } + + return n, nil +} + +// Type implements Endpoint.Type. +func (e *connectionlessEndpoint) Type() SockType { + return SockDgram +} + +// Connect attempts to connect directly to server. +func (e *connectionlessEndpoint) Connect(server BoundEndpoint) *tcpip.Error { + connected, err := server.UnidirectionalConnect() + if err != nil { + return err + } + + e.Lock() + e.connected = connected + e.Unlock() + + return nil +} + +// Listen starts listening on the connection. +func (e *connectionlessEndpoint) Listen(int) *tcpip.Error { + return tcpip.ErrNotSupported +} + +// Accept accepts a new connection. +func (e *connectionlessEndpoint) Accept() (Endpoint, *tcpip.Error) { + return nil, tcpip.ErrNotSupported +} + +// Bind binds the connection. +// +// For Unix endpoints, this _only sets the address associated with the socket_. +// Work associated with sockets in the filesystem or finding those sockets must +// be done by a higher level. +// +// Bind will fail only if the socket is connected, bound or the passed address +// is invalid (the empty string). +func (e *connectionlessEndpoint) Bind(addr tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error { + e.Lock() + defer e.Unlock() + if e.isBound() { + return tcpip.ErrAlreadyBound + } + if addr.Addr == "" { + // The empty string is not permitted. + return tcpip.ErrBadLocalAddress + } + if commit != nil { + if err := commit(); err != nil { + return err + } + } + + // Save the bound address. + e.path = string(addr.Addr) + return nil +} + +// Readiness returns the current readiness of the endpoint. For example, if +// waiter.EventIn is set, the endpoint is immediately readable. +func (e *connectionlessEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask { + e.Lock() + defer e.Unlock() + + ready := waiter.EventMask(0) + if mask&waiter.EventIn != 0 && e.receiver.Readable() { + ready |= waiter.EventIn + } + + if e.Connected() { + if mask&waiter.EventOut != 0 && e.connected.Writable() { + ready |= waiter.EventOut + } + } + + return ready +} diff --git a/pkg/tcpip/transport/unix/unix.go b/pkg/tcpip/transport/unix/unix.go new file mode 100644 index 000000000..5fe37eb71 --- /dev/null +++ b/pkg/tcpip/transport/unix/unix.go @@ -0,0 +1,902 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unix contains the implementation of Unix endpoints. +package unix + +import ( + "sync" + "sync/atomic" + + "gvisor.googlesource.com/gvisor/pkg/ilist" + "gvisor.googlesource.com/gvisor/pkg/tcpip" + "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/queue" + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// initialLimit is the starting limit for the socket buffers. +const initialLimit = 16 * 1024 + +// A SockType is a type (as opposed to family) of sockets. These are enumerated +// in the syscall package as syscall.SOCK_* constants. +type SockType int + +const ( + // SockStream corresponds to syscall.SOCK_STREAM. + SockStream SockType = 1 + // SockDgram corresponds to syscall.SOCK_DGRAM. + SockDgram SockType = 2 + // SockRaw corresponds to syscall.SOCK_RAW. + SockRaw SockType = 3 + // SockSeqpacket corresponds to syscall.SOCK_SEQPACKET. + SockSeqpacket SockType = 5 +) + +// A RightsControlMessage is a control message containing FDs. +type RightsControlMessage interface { + // Clone returns a copy of the RightsControlMessage. + Clone() RightsControlMessage + + // Release releases any resources owned by the RightsControlMessage. + Release() +} + +// A CredentialsControlMessage is a control message containing Unix credentials. +type CredentialsControlMessage interface { + // Equals returns true iff the two messages are equal. + Equals(CredentialsControlMessage) bool +} + +// A ControlMessages represents a collection of socket control messages. +type ControlMessages struct { + // Rights is a control message containing FDs. + Rights RightsControlMessage + + // Credentials is a control message containing Unix credentials. + Credentials CredentialsControlMessage +} + +// Empty returns true iff the ControlMessages does not contain either +// credentials or rights. +func (c *ControlMessages) Empty() bool { + return c.Rights == nil && c.Credentials == nil +} + +// Clone clones both the credentials and the rights. +func (c *ControlMessages) Clone() ControlMessages { + cm := ControlMessages{} + if c.Rights != nil { + cm.Rights = c.Rights.Clone() + } + cm.Credentials = c.Credentials + return cm +} + +// Release releases both the credentials and the rights. +func (c *ControlMessages) Release() { + if c.Rights != nil { + c.Rights.Release() + } + *c = ControlMessages{} +} + +// Endpoint is the interface implemented by Unix transport protocol +// implementations that expose functionality like sendmsg, recvmsg, connect, +// etc. to Unix socket implementations. +type Endpoint interface { + Credentialer + waiter.Waitable + + // Close puts the endpoint in a closed state and frees all resources + // associated with it. + Close() + + // RecvMsg reads data and a control message from the endpoint. This method + // does not block if there is no data pending. + // + // creds indicates if credential control messages are requested by the + // caller. This is useful for determining if control messages can be + // coalesced. creds is a hint and can be safely ignored by the + // implementation if no coalescing is possible. It is fine to return + // credential control messages when none were requested or to not return + // credential control messages when they were requested. + // + // numRights is the number of SCM_RIGHTS FDs requested by the caller. This + // is useful if one must allocate a buffer to receive a SCM_RIGHTS message + // or determine if control messages can be coalesced. numRights is a hint + // and can be safely ignored by the implementation if the number of + // available SCM_RIGHTS FDs is known and no coalescing is possible. It is + // fine for the returned number of SCM_RIGHTS FDs to be either higher or + // lower than the requested number. + // + // If peek is true, no data should be consumed from the Endpoint. Any and + // all data returned from a peek should be available in the next call to + // RecvMsg. + // + // recvLen is the number of bytes copied into data. + // + // msgLen is the length of the read message consumed for datagram Endpoints. + // msgLen is always the same as recvLen for stream Endpoints. + RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (recvLen, msgLen uintptr, cm ControlMessages, err *tcpip.Error) + + // SendMsg writes data and a control message to the endpoint's peer. + // This method does not block if the data cannot be written. + // + // SendMsg does not take ownership of any of its arguments on error. + SendMsg([][]byte, ControlMessages, BoundEndpoint) (uintptr, *tcpip.Error) + + // Connect connects this endpoint directly to another. + // + // This should be called on the client endpoint, and the (bound) + // endpoint passed in as a parameter. + // + // The error codes are the same as Connect. + Connect(server BoundEndpoint) *tcpip.Error + + // Shutdown closes the read and/or write end of the endpoint connection + // to its peer. + Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error + + // Listen puts the endpoint in "listen" mode, which allows it to accept + // new connections. + Listen(backlog int) *tcpip.Error + + // Accept returns a new endpoint if a peer has established a connection + // to an endpoint previously set to listen mode. This method does not + // block if no new connections are available. + // + // The returned Queue is the wait queue for the newly created endpoint. + Accept() (Endpoint, *tcpip.Error) + + // Bind binds the endpoint to a specific local address and port. + // Specifying a NIC is optional. + // + // An optional commit function will be executed atomically with respect + // to binding the endpoint. If this returns an error, the bind will not + // occur and the error will be propagated back to the caller. + Bind(address tcpip.FullAddress, commit func() *tcpip.Error) *tcpip.Error + + // Type return the socket type, typically either SockStream, SockDgram + // or SockSeqpacket. + Type() SockType + + // GetLocalAddress returns the address to which the endpoint is bound. + GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) + + // GetRemoteAddress returns the address to which the endpoint is + // connected. + GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) + + // SetSockOpt sets a socket option. opt should be one of the tcpip.*Option + // types. + SetSockOpt(opt interface{}) *tcpip.Error + + // GetSockOpt gets a socket option. opt should be a pointer to one of the + // tcpip.*Option types. + GetSockOpt(opt interface{}) *tcpip.Error +} + +// A Credentialer is a socket or endpoint that supports the SO_PASSCRED socket +// option. +type Credentialer interface { + // Passcred returns whether or not the SO_PASSCRED socket option is + // enabled on this end. + Passcred() bool + + // ConnectedPasscred returns whether or not the SO_PASSCRED socket option + // is enabled on the connected end. + ConnectedPasscred() bool +} + +// A BoundEndpoint is a unix endpoint that can be connected to. +type BoundEndpoint interface { + // BidirectionalConnect establishes a bi-directional connection between two + // unix endpoints in an all-or-nothing manner. If an error occurs during + // connecting, the state of neither endpoint should be modified. + // + // In order for an endpoint to establish such a bidirectional connection + // with a BoundEndpoint, the endpoint calls the BidirectionalConnect method + // on the BoundEndpoint and sends a representation of itself (the + // ConnectingEndpoint) and a callback (returnConnect) to receive the + // connection information (Receiver and ConnectedEndpoint) upon a + // successful connect. The callback should only be called on a successful + // connect. + // + // For a connection attempt to be successful, the ConnectingEndpoint must + // be unconnected and not listening and the BoundEndpoint whose + // BidirectionalConnect method is being called must be listening. + // + // This method will return tcpip.ErrConnectionRefused on endpoints with a + // type that isn't SockStream or SockSeqpacket. + BidirectionalConnect(ep ConnectingEndpoint, returnConnect func(Receiver, ConnectedEndpoint)) *tcpip.Error + + // UnidirectionalConnect establishes a write-only connection to a unix endpoint. + // + // This method will return tcpip.ErrConnectionRefused on a non-SockDgram + // endpoint. + UnidirectionalConnect() (ConnectedEndpoint, *tcpip.Error) + + // Release releases any resources held by the BoundEndpoint. It must be + // called before dropping all references to a BoundEndpoint returned by a + // function. + Release() +} + +// message represents a message passed over a Unix domain socket. +type message struct { + ilist.Entry + + // Data is the Message payload. + Data buffer.View + + // Control is auxiliary control message data that goes along with the + // data. + Control ControlMessages + + // Address is the bound address of the endpoint that sent the message. + // + // If the endpoint that sent the message is not bound, the Address is + // the empty string. + Address tcpip.FullAddress +} + +// Length returns number of bytes stored in the Message. +func (m *message) Length() int64 { + return int64(len(m.Data)) +} + +// Release releases any resources held by the Message. +func (m *message) Release() { + m.Control.Release() +} + +func (m *message) Peek() queue.Entry { + return &message{Data: m.Data, Control: m.Control.Clone(), Address: m.Address} +} + +// A Receiver can be used to receive Messages. +type Receiver interface { + // Recv receives a single message. This method does not block. + // + // See Endpoint.RecvMsg for documentation on shared arguments. + // + // notify indicates if RecvNotify should be called. + Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (recvLen, msgLen uintptr, cm ControlMessages, source tcpip.FullAddress, notify bool, err *tcpip.Error) + + // RecvNotify notifies the Receiver of a successful Recv. This must not be + // called while holding any endpoint locks. + RecvNotify() + + // CloseRecv prevents the receiving of additional Messages. + // + // After CloseRecv is called, CloseNotify must also be called. + CloseRecv() + + // CloseNotify notifies the Receiver of recv being closed. This must not be + // called while holding any endpoint locks. + CloseNotify() + + // Readable returns if messages should be attempted to be received. This + // includes when read has been shutdown. + Readable() bool + + // RecvQueuedSize returns the total amount of data currently receivable. + // RecvQueuedSize should return -1 if the operation isn't supported. + RecvQueuedSize() int64 + + // RecvMaxQueueSize returns maximum value for RecvQueuedSize. + // RecvMaxQueueSize should return -1 if the operation isn't supported. + RecvMaxQueueSize() int64 + + // Release releases any resources owned by the Receiver. It should be + // called before droping all references to a Receiver. + Release() +} + +// queueReceiver implements Receiver for datagram sockets. +type queueReceiver struct { + readQueue *queue.Queue +} + +// Recv implements Receiver.Recv. +func (q *queueReceiver) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, tcpip.FullAddress, bool, *tcpip.Error) { + var m queue.Entry + var notify bool + var err *tcpip.Error + if peek { + m, err = q.readQueue.Peek() + } else { + m, notify, err = q.readQueue.Dequeue() + } + if err != nil { + return 0, 0, ControlMessages{}, tcpip.FullAddress{}, false, err + } + msg := m.(*message) + src := []byte(msg.Data) + var copied uintptr + for i := 0; i < len(data) && len(src) > 0; i++ { + n := copy(data[i], src) + copied += uintptr(n) + src = src[n:] + } + return copied, uintptr(len(msg.Data)), msg.Control, msg.Address, notify, nil +} + +// RecvNotify implements Receiver.RecvNotify. +func (q *queueReceiver) RecvNotify() { + q.readQueue.WriterQueue.Notify(waiter.EventOut) +} + +// CloseNotify implements Receiver.CloseNotify. +func (q *queueReceiver) CloseNotify() { + q.readQueue.ReaderQueue.Notify(waiter.EventIn) + q.readQueue.WriterQueue.Notify(waiter.EventOut) +} + +// CloseRecv implements Receiver.CloseRecv. +func (q *queueReceiver) CloseRecv() { + q.readQueue.Close() +} + +// Readable implements Receiver.Readable. +func (q *queueReceiver) Readable() bool { + return q.readQueue.IsReadable() +} + +// RecvQueuedSize implements Receiver.RecvQueuedSize. +func (q *queueReceiver) RecvQueuedSize() int64 { + return q.readQueue.QueuedSize() +} + +// RecvMaxQueueSize implements Receiver.RecvMaxQueueSize. +func (q *queueReceiver) RecvMaxQueueSize() int64 { + return q.readQueue.MaxQueueSize() +} + +// Release implements Receiver.Release. +func (*queueReceiver) Release() {} + +// streamQueueReceiver implements Receiver for stream sockets. +type streamQueueReceiver struct { + queueReceiver + + mu sync.Mutex `state:"nosave"` + buffer []byte + control ControlMessages + addr tcpip.FullAddress +} + +func vecCopy(data [][]byte, buf []byte) (uintptr, [][]byte, []byte) { + var copied uintptr + for len(data) > 0 && len(buf) > 0 { + n := copy(data[0], buf) + copied += uintptr(n) + buf = buf[n:] + data[0] = data[0][n:] + if len(data[0]) == 0 { + data = data[1:] + } + } + return copied, data, buf +} + +// Readable implements Receiver.Readable. +func (q *streamQueueReceiver) Readable() bool { + // We're readable if we have data in our buffer or if the queue receiver is + // readable. + return len(q.buffer) > 0 || q.readQueue.IsReadable() +} + +// RecvQueuedSize implements Receiver.RecvQueuedSize. +func (q *streamQueueReceiver) RecvQueuedSize() int64 { + return int64(len(q.buffer)) + q.readQueue.QueuedSize() +} + +// RecvMaxQueueSize implements Receiver.RecvMaxQueueSize. +func (q *streamQueueReceiver) RecvMaxQueueSize() int64 { + // The RecvMaxQueueSize() is the readQueue's MaxQueueSize() plus the largest + // message we can buffer which is also the largest message we can receive. + return 2 * q.readQueue.MaxQueueSize() +} + +// Recv implements Receiver.Recv. +func (q *streamQueueReceiver) Recv(data [][]byte, wantCreds bool, numRights uintptr, peek bool) (uintptr, uintptr, ControlMessages, tcpip.FullAddress, bool, *tcpip.Error) { + q.mu.Lock() + defer q.mu.Unlock() + + var notify bool + + // If we have no data in the endpoint, we need to get some. + if len(q.buffer) == 0 { + // Load the next message into a buffer, even if we are peeking. Peeking + // won't consume the message, so it will be still available to be read + // the next time Recv() is called. + m, n, err := q.readQueue.Dequeue() + if err != nil { + return 0, 0, ControlMessages{}, tcpip.FullAddress{}, false, err + } + notify = n + msg := m.(*message) + q.buffer = []byte(msg.Data) + q.control = msg.Control + q.addr = msg.Address + } + + var copied uintptr + if peek { + // Don't consume control message if we are peeking. + c := q.control.Clone() + + // Don't consume data since we are peeking. + copied, data, _ = vecCopy(data, q.buffer) + + return copied, copied, c, q.addr, notify, nil + } + + // Consume data and control message since we are not peeking. + copied, data, q.buffer = vecCopy(data, q.buffer) + + // Save the original state of q.control. + c := q.control + + // Remove rights from q.control and leave behind just the creds. + q.control.Rights = nil + if !wantCreds { + c.Credentials = nil + } + + if c.Rights != nil && numRights == 0 { + c.Rights.Release() + c.Rights = nil + } + + haveRights := c.Rights != nil + + // If we have more capacity for data and haven't received any usable + // rights. + // + // Linux never coalesces rights control messages. + for !haveRights && len(data) > 0 { + // Get a message from the readQueue. + m, n, err := q.readQueue.Dequeue() + if err != nil { + // We already got some data, so ignore this error. This will + // manifest as a short read to the user, which is what Linux + // does. + break + } + notify = notify || n + msg := m.(*message) + q.buffer = []byte(msg.Data) + q.control = msg.Control + q.addr = msg.Address + + if wantCreds { + if (q.control.Credentials == nil) != (c.Credentials == nil) { + // One message has credentials, the other does not. + break + } + + if q.control.Credentials != nil && c.Credentials != nil && !q.control.Credentials.Equals(c.Credentials) { + // Both messages have credentials, but they don't match. + break + } + } + + if numRights != 0 && c.Rights != nil && q.control.Rights != nil { + // Both messages have rights. + break + } + + var cpd uintptr + cpd, data, q.buffer = vecCopy(data, q.buffer) + copied += cpd + + if cpd == 0 { + // data was actually full. + break + } + + if q.control.Rights != nil { + // Consume rights. + if numRights == 0 { + q.control.Rights.Release() + } else { + c.Rights = q.control.Rights + haveRights = true + } + q.control.Rights = nil + } + } + return copied, copied, c, q.addr, notify, nil +} + +// A ConnectedEndpoint is an Endpoint that can be used to send Messages. +type ConnectedEndpoint interface { + // Passcred implements Endpoint.Passcred. + Passcred() bool + + // GetLocalAddress implements Endpoint.GetLocalAddress. + GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) + + // Send sends a single message. This method does not block. + // + // notify indicates if SendNotify should be called. + Send(data [][]byte, controlMessages ControlMessages, from tcpip.FullAddress) (n uintptr, notify bool, err *tcpip.Error) + + // SendNotify notifies the ConnectedEndpoint of a successful Send. This + // must not be called while holding any endpoint locks. + SendNotify() + + // CloseSend prevents the sending of additional Messages. + // + // After CloseSend is call, CloseNotify must also be called. + CloseSend() + + // CloseNotify notifies the ConnectedEndpoint of send being closed. This + // must not be called while holding any endpoint locks. + CloseNotify() + + // Writable returns if messages should be attempted to be sent. This + // includes when write has been shutdown. + Writable() bool + + // EventUpdate lets the ConnectedEndpoint know that event registrations + // have changed. + EventUpdate() + + // SendQueuedSize returns the total amount of data currently queued for + // sending. SendQueuedSize should return -1 if the operation isn't + // supported. + SendQueuedSize() int64 + + // SendMaxQueueSize returns maximum value for SendQueuedSize. + // SendMaxQueueSize should return -1 if the operation isn't supported. + SendMaxQueueSize() int64 + + // Release releases any resources owned by the ConnectedEndpoint. It should + // be called before droping all references to a ConnectedEndpoint. + Release() +} + +type connectedEndpoint struct { + // endpoint represents the subset of the Endpoint functionality needed by + // the connectedEndpoint. It is implemented by both connectionedEndpoint + // and connectionlessEndpoint and allows the use of types which don't + // fully implement Endpoint. + endpoint interface { + // Passcred implements Endpoint.Passcred. + Passcred() bool + + // GetLocalAddress implements Endpoint.GetLocalAddress. + GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) + + // Type implements Endpoint.Type. + Type() SockType + } + + writeQueue *queue.Queue +} + +// Passcred implements ConnectedEndpoint.Passcred. +func (e *connectedEndpoint) Passcred() bool { + return e.endpoint.Passcred() +} + +// GetLocalAddress implements ConnectedEndpoint.GetLocalAddress. +func (e *connectedEndpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + return e.endpoint.GetLocalAddress() +} + +// Send implements ConnectedEndpoint.Send. +func (e *connectedEndpoint) Send(data [][]byte, controlMessages ControlMessages, from tcpip.FullAddress) (uintptr, bool, *tcpip.Error) { + var l int + for _, d := range data { + l += len(d) + } + // Discard empty stream packets. Since stream sockets don't preserve + // message boundaries, sending zero bytes is a no-op. In Linux, the + // receiver actually uses a zero-length receive as an indication that the + // stream was closed. + if l == 0 && e.endpoint.Type() == SockStream { + controlMessages.Release() + return 0, false, nil + } + v := make([]byte, 0, l) + for _, d := range data { + v = append(v, d...) + } + notify, err := e.writeQueue.Enqueue(&message{Data: buffer.View(v), Control: controlMessages, Address: from}) + return uintptr(l), notify, err +} + +// SendNotify implements ConnectedEndpoint.SendNotify. +func (e *connectedEndpoint) SendNotify() { + e.writeQueue.ReaderQueue.Notify(waiter.EventIn) +} + +// CloseNotify implements ConnectedEndpoint.CloseNotify. +func (e *connectedEndpoint) CloseNotify() { + e.writeQueue.ReaderQueue.Notify(waiter.EventIn) + e.writeQueue.WriterQueue.Notify(waiter.EventOut) +} + +// CloseSend implements ConnectedEndpoint.CloseSend. +func (e *connectedEndpoint) CloseSend() { + e.writeQueue.Close() +} + +// Writable implements ConnectedEndpoint.Writable. +func (e *connectedEndpoint) Writable() bool { + return e.writeQueue.IsWritable() +} + +// EventUpdate implements ConnectedEndpoint.EventUpdate. +func (*connectedEndpoint) EventUpdate() {} + +// SendQueuedSize implements ConnectedEndpoint.SendQueuedSize. +func (e *connectedEndpoint) SendQueuedSize() int64 { + return e.writeQueue.QueuedSize() +} + +// SendMaxQueueSize implements ConnectedEndpoint.SendMaxQueueSize. +func (e *connectedEndpoint) SendMaxQueueSize() int64 { + return e.writeQueue.MaxQueueSize() +} + +// Release implements ConnectedEndpoint.Release. +func (*connectedEndpoint) Release() {} + +// baseEndpoint is an embeddable unix endpoint base used in both the connected and connectionless +// unix domain socket Endpoint implementations. +// +// Not to be used on its own. +type baseEndpoint struct { + *waiter.Queue + + // passcred specifies whether SCM_CREDENTIALS socket control messages are + // enabled on this endpoint. Must be accessed atomically. + passcred int32 + + // Mutex protects the below fields. + sync.Mutex `state:"nosave"` + + // receiver allows Messages to be received. + receiver Receiver + + // connected allows messages to be sent and state information about the + // connected endpoint to be read. + connected ConnectedEndpoint + + // path is not empty if the endpoint has been bound, + // or may be used if the endpoint is connected. + path string +} + +// EventRegister implements waiter.Waitable.EventRegister. +func (e *baseEndpoint) EventRegister(we *waiter.Entry, mask waiter.EventMask) { + e.Lock() + e.Queue.EventRegister(we, mask) + if e.connected != nil { + e.connected.EventUpdate() + } + e.Unlock() +} + +// EventUnregister implements waiter.Waitable.EventUnregister. +func (e *baseEndpoint) EventUnregister(we *waiter.Entry) { + e.Lock() + e.Queue.EventUnregister(we) + if e.connected != nil { + e.connected.EventUpdate() + } + e.Unlock() +} + +// Passcred implements Credentialer.Passcred. +func (e *baseEndpoint) Passcred() bool { + return atomic.LoadInt32(&e.passcred) != 0 +} + +// ConnectedPasscred implements Credentialer.ConnectedPasscred. +func (e *baseEndpoint) ConnectedPasscred() bool { + e.Lock() + defer e.Unlock() + return e.connected != nil && e.connected.Passcred() +} + +func (e *baseEndpoint) setPasscred(pc bool) { + if pc { + atomic.StoreInt32(&e.passcred, 1) + } else { + atomic.StoreInt32(&e.passcred, 0) + } +} + +// Connected implements ConnectingEndpoint.Connected. +func (e *baseEndpoint) Connected() bool { + return e.receiver != nil && e.connected != nil +} + +// RecvMsg reads data and a control message from the endpoint. +func (e *baseEndpoint) RecvMsg(data [][]byte, creds bool, numRights uintptr, peek bool, addr *tcpip.FullAddress) (uintptr, uintptr, ControlMessages, *tcpip.Error) { + e.Lock() + + if e.receiver == nil { + e.Unlock() + return 0, 0, ControlMessages{}, tcpip.ErrNotConnected + } + + recvLen, msgLen, cms, a, notify, err := e.receiver.Recv(data, creds, numRights, peek) + e.Unlock() + if err != nil { + return 0, 0, ControlMessages{}, err + } + + if notify { + e.receiver.RecvNotify() + } + + if addr != nil { + *addr = a + } + return recvLen, msgLen, cms, nil +} + +// SendMsg writes data and a control message to the endpoint's peer. +// This method does not block if the data cannot be written. +func (e *baseEndpoint) SendMsg(data [][]byte, c ControlMessages, to BoundEndpoint) (uintptr, *tcpip.Error) { + e.Lock() + if !e.Connected() { + e.Unlock() + return 0, tcpip.ErrNotConnected + } + if to != nil { + e.Unlock() + return 0, tcpip.ErrAlreadyConnected + } + + n, notify, err := e.connected.Send(data, c, tcpip.FullAddress{Addr: tcpip.Address(e.path)}) + e.Unlock() + if err != nil { + return 0, err + } + + if notify { + e.connected.SendNotify() + } + + return n, nil +} + +// SetSockOpt sets a socket option. Currently not supported. +func (e *baseEndpoint) SetSockOpt(opt interface{}) *tcpip.Error { + switch v := opt.(type) { + case tcpip.PasscredOption: + e.setPasscred(v != 0) + return nil + } + return nil +} + +// GetSockOpt implements tcpip.Endpoint.GetSockOpt. +func (e *baseEndpoint) GetSockOpt(opt interface{}) *tcpip.Error { + switch o := opt.(type) { + case tcpip.ErrorOption: + return nil + case *tcpip.SendQueueSizeOption: + e.Lock() + if !e.Connected() { + e.Unlock() + return tcpip.ErrNotConnected + } + qs := tcpip.SendQueueSizeOption(e.connected.SendQueuedSize()) + e.Unlock() + if qs < 0 { + return tcpip.ErrQueueSizeNotSupported + } + *o = qs + return nil + case *tcpip.ReceiveQueueSizeOption: + e.Lock() + if !e.Connected() { + e.Unlock() + return tcpip.ErrNotConnected + } + qs := tcpip.ReceiveQueueSizeOption(e.receiver.RecvQueuedSize()) + e.Unlock() + if qs < 0 { + return tcpip.ErrQueueSizeNotSupported + } + *o = qs + return nil + case *tcpip.PasscredOption: + if e.Passcred() { + *o = tcpip.PasscredOption(1) + } else { + *o = tcpip.PasscredOption(0) + } + return nil + case *tcpip.SendBufferSizeOption: + e.Lock() + if !e.Connected() { + e.Unlock() + return tcpip.ErrNotConnected + } + qs := tcpip.SendBufferSizeOption(e.connected.SendMaxQueueSize()) + e.Unlock() + if qs < 0 { + return tcpip.ErrQueueSizeNotSupported + } + *o = qs + return nil + case *tcpip.ReceiveBufferSizeOption: + e.Lock() + if e.receiver == nil { + e.Unlock() + return tcpip.ErrNotConnected + } + qs := tcpip.ReceiveBufferSizeOption(e.receiver.RecvMaxQueueSize()) + e.Unlock() + if qs < 0 { + return tcpip.ErrQueueSizeNotSupported + } + *o = qs + return nil + } + return tcpip.ErrUnknownProtocolOption +} + +// Shutdown closes the read and/or write end of the endpoint connection to its +// peer. +func (e *baseEndpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { + e.Lock() + if !e.Connected() { + e.Unlock() + return tcpip.ErrNotConnected + } + + if flags&tcpip.ShutdownRead != 0 { + e.receiver.CloseRecv() + } + + if flags&tcpip.ShutdownWrite != 0 { + e.connected.CloseSend() + } + + e.Unlock() + + if flags&tcpip.ShutdownRead != 0 { + e.receiver.CloseNotify() + } + + if flags&tcpip.ShutdownWrite != 0 { + e.connected.CloseNotify() + } + + return nil +} + +// GetLocalAddress returns the bound path. +func (e *baseEndpoint) GetLocalAddress() (tcpip.FullAddress, *tcpip.Error) { + e.Lock() + defer e.Unlock() + return tcpip.FullAddress{Addr: tcpip.Address(e.path)}, nil +} + +// GetRemoteAddress returns the local address of the connected endpoint (if +// available). +func (e *baseEndpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { + e.Lock() + c := e.connected + e.Unlock() + if c != nil { + return c.GetLocalAddress() + } + return tcpip.FullAddress{}, tcpip.ErrNotConnected +} + +// Release implements BoundEndpoint.Release. +func (*baseEndpoint) Release() {} diff --git a/pkg/tmutex/BUILD b/pkg/tmutex/BUILD new file mode 100644 index 000000000..5d1614d35 --- /dev/null +++ b/pkg/tmutex/BUILD @@ -0,0 +1,17 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "tmutex", + srcs = ["tmutex.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/tmutex", + visibility = ["//:sandbox"], +) + +go_test( + name = "tmutex_test", + size = "medium", + srcs = ["tmutex_test.go"], + embed = [":tmutex"], +) diff --git a/pkg/tmutex/tmutex.go b/pkg/tmutex/tmutex.go new file mode 100644 index 000000000..61779654f --- /dev/null +++ b/pkg/tmutex/tmutex.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tmutex provides the implementation of a mutex that implements an +// efficient TryLock function in addition to Lock and Unlock. +package tmutex + +import ( + "sync/atomic" +) + +// Mutex is a mutual exclusion primitive that implements TryLock in addition +// to Lock and Unlock. +type Mutex struct { + v int32 + ch chan struct{} +} + +// Init initializes the mutex. +func (m *Mutex) Init() { + m.v = 1 + m.ch = make(chan struct{}, 1) +} + +// Lock acquires the mutex. If it is currently held by another goroutine, Lock +// will wait until it has a chance to acquire it. +func (m *Mutex) Lock() { + // Uncontended case. + if atomic.AddInt32(&m.v, -1) == 0 { + return + } + + for { + // Try to acquire the mutex again, at the same time making sure + // that m.v is negative, which indicates to the owner of the + // lock that it is contended, which will force it to try to wake + // someone up when it releases the mutex. + if v := atomic.LoadInt32(&m.v); v >= 0 && atomic.SwapInt32(&m.v, -1) == 1 { + return + } + + // Wait for the mutex to be released before trying again. + <-m.ch + } +} + +// TryLock attempts to acquire the mutex without blocking. If the mutex is +// currently held by another goroutine, it fails to acquire it and returns +// false. +func (m *Mutex) TryLock() bool { + v := atomic.LoadInt32(&m.v) + if v <= 0 { + return false + } + return atomic.CompareAndSwapInt32(&m.v, 1, 0) +} + +// Unlock releases the mutex. +func (m *Mutex) Unlock() { + if atomic.SwapInt32(&m.v, 1) == 0 { + // There were no pending waiters. + return + } + + // Wake some waiter up. + select { + case m.ch <- struct{}{}: + default: + } +} diff --git a/pkg/tmutex/tmutex_test.go b/pkg/tmutex/tmutex_test.go new file mode 100644 index 000000000..e1b5fd4e2 --- /dev/null +++ b/pkg/tmutex/tmutex_test.go @@ -0,0 +1,247 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tmutex + +import ( + "fmt" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestBasicLock(t *testing.T) { + var m Mutex + m.Init() + + m.Lock() + + // Try blocking lock the mutex from a different goroutine. This must + // not block because the mutex is held. + ch := make(chan struct{}, 1) + go func() { + m.Lock() + ch <- struct{}{} + m.Unlock() + ch <- struct{}{} + }() + + select { + case <-ch: + t.Fatalf("Lock succeeded on locked mutex") + case <-time.After(100 * time.Millisecond): + } + + // Unlock the mutex and make sure that the goroutine waiting on Lock() + // unblocks and succeeds. + m.Unlock() + + select { + case <-ch: + case <-time.After(100 * time.Millisecond): + t.Fatalf("Lock failed to acquire unlocked mutex") + } + + // Make sure we can lock and unlock again. + m.Lock() + m.Unlock() +} + +func TestTryLock(t *testing.T) { + var m Mutex + m.Init() + + // Try to lock. It should succeed. + if !m.TryLock() { + t.Fatalf("TryLock failed on unlocked mutex") + } + + // Try to lock again, it should now fail. + if m.TryLock() { + t.Fatalf("TryLock succeeded on locked mutex") + } + + // Try blocking lock the mutex from a different goroutine. This must + // not block because the mutex is held. + ch := make(chan struct{}, 1) + go func() { + m.Lock() + ch <- struct{}{} + m.Unlock() + }() + + select { + case <-ch: + t.Fatalf("Lock succeeded on locked mutex") + case <-time.After(100 * time.Millisecond): + } + + // Unlock the mutex and make sure that the goroutine waiting on Lock() + // unblocks and succeeds. + m.Unlock() + + select { + case <-ch: + case <-time.After(100 * time.Millisecond): + t.Fatalf("Lock failed to acquire unlocked mutex") + } +} + +func TestMutualExclusion(t *testing.T) { + var m Mutex + m.Init() + + // Test mutual exclusion by running "gr" goroutines concurrently, and + // have each one increment a counter "iters" times within the critical + // section established by the mutex. + // + // If at the end the counter is not gr * iters, then we know that + // goroutines ran concurrently within the critical section. + // + // If one of the goroutines doesn't complete, it's likely a bug that + // causes to it to wait forever. + const gr = 1000 + const iters = 100000 + v := 0 + var wg sync.WaitGroup + for i := 0; i < gr; i++ { + wg.Add(1) + go func() { + for j := 0; j < iters; j++ { + m.Lock() + v++ + m.Unlock() + } + wg.Done() + }() + } + + wg.Wait() + + if v != gr*iters { + t.Fatalf("Bad count: got %v, want %v", v, gr*iters) + } +} + +func TestMutualExclusionWithTryLock(t *testing.T) { + var m Mutex + m.Init() + + // Similar to the previous, with the addition of some goroutines that + // only increment the count if TryLock succeeds. + const gr = 1000 + const iters = 100000 + total := int64(gr * iters) + var tryTotal int64 + v := int64(0) + var wg sync.WaitGroup + for i := 0; i < gr; i++ { + wg.Add(2) + go func() { + for j := 0; j < iters; j++ { + m.Lock() + v++ + m.Unlock() + } + wg.Done() + }() + go func() { + local := int64(0) + for j := 0; j < iters; j++ { + if m.TryLock() { + v++ + m.Unlock() + local++ + } + } + atomic.AddInt64(&tryTotal, local) + wg.Done() + }() + } + + wg.Wait() + + t.Logf("tryTotal = %d", tryTotal) + total += tryTotal + + if v != total { + t.Fatalf("Bad count: got %v, want %v", v, total) + } +} + +// BenchmarkTmutex is equivalent to TestMutualExclusion, with the following +// differences: +// +// - The number of goroutines is variable, with the maximum value depending on +// GOMAXPROCS. +// +// - The number of iterations per benchmark is controlled by the benchmarking +// framework. +// +// - Care is taken to ensure that all goroutines participating in the benchmark +// have been created before the benchmark begins. +func BenchmarkTmutex(b *testing.B) { + for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + var m Mutex + m.Init() + + var ready sync.WaitGroup + begin := make(chan struct{}) + var end sync.WaitGroup + for i := 0; i < n; i++ { + ready.Add(1) + end.Add(1) + go func() { + ready.Done() + <-begin + for j := 0; j < b.N; j++ { + m.Lock() + m.Unlock() + } + end.Done() + }() + } + + ready.Wait() + b.ResetTimer() + close(begin) + end.Wait() + }) + } +} + +// BenchmarkSyncMutex is equivalent to BenchmarkTmutex, but uses sync.Mutex as +// a comparison point. +func BenchmarkSyncMutex(b *testing.B) { + for n, max := 1, 4*runtime.GOMAXPROCS(0); n > 0 && n <= max; n *= 2 { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + var m sync.Mutex + + var ready sync.WaitGroup + begin := make(chan struct{}) + var end sync.WaitGroup + for i := 0; i < n; i++ { + ready.Add(1) + end.Add(1) + go func() { + ready.Done() + <-begin + for j := 0; j < b.N; j++ { + m.Lock() + m.Unlock() + } + end.Done() + }() + } + + ready.Wait() + b.ResetTimer() + close(begin) + end.Wait() + }) + } +} diff --git a/pkg/unet/BUILD b/pkg/unet/BUILD new file mode 100644 index 000000000..e8e40315a --- /dev/null +++ b/pkg/unet/BUILD @@ -0,0 +1,26 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "unet", + srcs = [ + "unet.go", + "unet_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/unet", + visibility = ["//visibility:public"], + deps = [ + "//pkg/abi/linux", + "//pkg/gate", + ], +) + +go_test( + name = "unet_test", + size = "small", + srcs = [ + "unet_test.go", + ], + embed = [":unet"], +) diff --git a/pkg/unet/unet.go b/pkg/unet/unet.go new file mode 100644 index 000000000..59b6c5568 --- /dev/null +++ b/pkg/unet/unet.go @@ -0,0 +1,569 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package unet provides a minimal net package based on Unix Domain Sockets. +// +// This does no pooling, and should only be used for a limited number of +// connections in a Go process. Don't use this package for arbitrary servers. +package unet + +import ( + "errors" + "sync/atomic" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/gate" +) + +// backlog is used for the listen request. +const backlog = 16 + +// errClosing is returned by wait if the Socket is in the process of closing. +var errClosing = errors.New("Socket is closing") + +// errMessageTruncated indicates that data was lost because the provided buffer +// was too small. +var errMessageTruncated = errors.New("message truncated") + +// socketType returns the appropriate type. +func socketType(packet bool) int { + if packet { + return syscall.SOCK_SEQPACKET + } + return syscall.SOCK_STREAM +} + +// socket creates a new host socket. +func socket(packet bool) (int, error) { + // Make a new socket. + fd, err := syscall.Socket(syscall.AF_UNIX, socketType(packet), 0) + if err != nil { + return 0, err + } + + return fd, nil +} + +// eventFD returns a new event FD with initial value 0. +func eventFD() (int, error) { + f, _, e := syscall.Syscall(syscall.SYS_EVENTFD2, 0, 0, 0) + if e != 0 { + return -1, e + } + return int(f), nil +} + +// Socket is a connected unix domain socket. +type Socket struct { + // gate protects use of fd. + gate gate.Gate + + // fd is the bound socket. + // + // fd must be read atomically, and only remains valid if read while + // within gate. + fd int32 + + // efd is an event FD that is signaled when the socket is closing. + // + // efd is immutable and remains valid until Close/Release. + efd int + + // race is an atomic variable used to avoid triggering the race + // detector. See comment in SocketPair below. + race *int32 +} + +// NewSocket returns a socket from an existing FD. +// +// NewSocket takes ownership of fd. +func NewSocket(fd int) (*Socket, error) { + // fd must be non-blocking for non-blocking syscall.Accept in + // ServerSocket.Accept. + if err := syscall.SetNonblock(fd, true); err != nil { + return nil, err + } + + efd, err := eventFD() + if err != nil { + return nil, err + } + + return &Socket{ + fd: int32(fd), + efd: efd, + }, nil +} + +// finish completes use of s.fd by evicting any waiters, closing the gate, and +// closing the event FD. +func (s *Socket) finish() error { + // Signal any blocked or future polls. + // + // N.B. eventfd writes must be 8 bytes. + if _, err := syscall.Write(s.efd, []byte{1, 0, 0, 0, 0, 0, 0, 0}); err != nil { + return err + } + + // Close the gate, blocking until all FD users leave. + s.gate.Close() + + return syscall.Close(s.efd) +} + +// Close closes the socket. +func (s *Socket) Close() error { + // Set the FD in the socket to -1, to ensure that all future calls to + // FD/Release get nothing and Close calls return immediately. + fd := int(atomic.SwapInt32(&s.fd, -1)) + if fd < 0 { + // Already closed or closing. + return syscall.EBADF + } + + // Shutdown the socket to cancel any pending accepts. + s.shutdown(fd) + + if err := s.finish(); err != nil { + return err + } + + return syscall.Close(fd) +} + +// Release releases ownership of the socket FD. +// +// The returned FD is non-blocking. +// +// Any concurrent or future callers of Socket methods will receive EBADF. +func (s *Socket) Release() (int, error) { + // Set the FD in the socket to -1, to ensure that all future calls to + // FD/Release get nothing and Close calls return immediately. + fd := int(atomic.SwapInt32(&s.fd, -1)) + if fd < 0 { + // Already closed or closing. + return -1, syscall.EBADF + } + + if err := s.finish(); err != nil { + return -1, err + } + + return fd, nil +} + +// FD returns the FD for this Socket. +// +// The FD is non-blocking and must not be made blocking. +// +// N.B. os.File.Fd makes the FD blocking. Use of Release instead of FD is +// strongly preferred. +// +// The returned FD cannot be used safely if there may be concurrent callers to +// Close or Release. +// +// Use Release to take ownership of the FD. +func (s *Socket) FD() int { + return int(atomic.LoadInt32(&s.fd)) +} + +// enterFD enters the FD gate and returns the FD value. +// +// If enterFD returns ok, s.gate.Leave must be called when done with the FD. +// Callers may only block while within the gate using s.wait. +// +// The returned FD is guaranteed to remain valid until s.gate.Leave. +func (s *Socket) enterFD() (int, bool) { + if !s.gate.Enter() { + return -1, false + } + + fd := int(atomic.LoadInt32(&s.fd)) + if fd < 0 { + s.gate.Leave() + return -1, false + } + + return fd, true +} + +// SocketPair creates a pair of connected sockets. +func SocketPair(packet bool) (*Socket, *Socket, error) { + // Make a new pair. + fds, err := syscall.Socketpair(syscall.AF_UNIX, socketType(packet), 0) + if err != nil { + return nil, nil, err + } + + // race is an atomic variable used to avoid triggering the race + // detector. We have to fool TSAN into thinking there is a race + // variable between our two sockets. We only use SocketPair in tests + // anyway. + // + // NOTE: This is purely due to the fact that the raw + // syscall does not serve as a boundary for the sanitizer. + var race int32 + a, err := NewSocket(fds[0]) + if err != nil { + syscall.Close(fds[0]) + syscall.Close(fds[1]) + return nil, nil, err + } + a.race = &race + b, err := NewSocket(fds[1]) + if err != nil { + a.Close() + syscall.Close(fds[1]) + return nil, nil, err + } + b.race = &race + return a, b, nil +} + +// Connect connects to a server. +func Connect(addr string, packet bool) (*Socket, error) { + fd, err := socket(packet) + if err != nil { + return nil, err + } + + // Connect the socket. + usa := &syscall.SockaddrUnix{Name: addr} + if err := syscall.Connect(fd, usa); err != nil { + syscall.Close(fd) + return nil, err + } + + return NewSocket(fd) +} + +// ControlMessage wraps around a byte array and provides functions for parsing +// as a Unix Domain Socket control message. +type ControlMessage []byte + +// EnableFDs enables receiving FDs via control message. +// +// This guarantees only a MINIMUM number of FDs received. You may receive MORE +// than this due to the way FDs are packed. To be specific, the number of +// receivable buffers will be rounded up to the nearest even number. +// +// This must be called prior to ReadVec if you want to receive FDs. +func (c *ControlMessage) EnableFDs(count int) { + *c = make([]byte, syscall.CmsgSpace(count*4)) +} + +// ExtractFDs returns the list of FDs in the control message. +// +// Either this or CloseFDs should be used after EnableFDs. +func (c *ControlMessage) ExtractFDs() ([]int, error) { + msgs, err := syscall.ParseSocketControlMessage(*c) + if err != nil { + return nil, err + } + var fds []int + for _, msg := range msgs { + thisFds, err := syscall.ParseUnixRights(&msg) + if err != nil { + // Different control message. + return nil, err + } + for _, fd := range thisFds { + if fd >= 0 { + fds = append(fds, fd) + } + } + } + return fds, nil +} + +// CloseFDs closes the list of FDs in the control message. +// +// Either this or ExtractFDs should be used after EnableFDs. +func (c *ControlMessage) CloseFDs() { + fds, _ := c.ExtractFDs() + for _, fd := range fds { + if fd >= 0 { + syscall.Close(fd) + } + } +} + +// PackFDs packs the given list of FDs in the control message. +// +// This must be used prior to WriteVec. +func (c *ControlMessage) PackFDs(fds ...int) { + *c = ControlMessage(syscall.UnixRights(fds...)) +} + +// UnpackFDs clears the control message. +func (c *ControlMessage) UnpackFDs() { + *c = nil +} + +// SocketWriter wraps an individual send operation. +// +// The normal entrypoint is WriteVec. +type SocketWriter struct { + socket *Socket + to []byte + blocking bool + race *int32 + + ControlMessage +} + +// Writer returns a writer for this socket. +func (s *Socket) Writer(blocking bool) SocketWriter { + return SocketWriter{socket: s, blocking: blocking, race: s.race} +} + +// Write implements io.Writer.Write. +func (s *Socket) Write(p []byte) (int, error) { + r := s.Writer(true) + return r.WriteVec([][]byte{p}) +} + +// GetSockOpt gets the given socket option. +func (s *Socket) GetSockOpt(level int, name int, b []byte) (uint32, error) { + fd, ok := s.enterFD() + if !ok { + return 0, syscall.EBADF + } + defer s.gate.Leave() + + return getsockopt(fd, level, name, b) +} + +// SetSockOpt sets the given socket option. +func (s *Socket) SetSockOpt(level, name int, b []byte) error { + fd, ok := s.enterFD() + if !ok { + return syscall.EBADF + } + defer s.gate.Leave() + + return setsockopt(fd, level, name, b) +} + +// GetSockName returns the socket name. +func (s *Socket) GetSockName() ([]byte, error) { + fd, ok := s.enterFD() + if !ok { + return nil, syscall.EBADF + } + defer s.gate.Leave() + + var buf []byte + l := syscall.SizeofSockaddrAny + + for { + // If the buffer is not large enough, allocate a new one with the hint. + buf = make([]byte, l) + l, err := getsockname(fd, buf) + if err != nil { + return nil, err + } + + if l <= uint32(len(buf)) { + return buf[:l], nil + } + } +} + +// GetPeerName returns the peer name. +func (s *Socket) GetPeerName() ([]byte, error) { + fd, ok := s.enterFD() + if !ok { + return nil, syscall.EBADF + } + defer s.gate.Leave() + + var buf []byte + l := syscall.SizeofSockaddrAny + + for { + // See above. + buf = make([]byte, l) + l, err := getpeername(fd, buf) + if err != nil { + return nil, err + } + + if l <= uint32(len(buf)) { + return buf[:l], nil + } + } +} + +// GetPeerCred returns the peer's unix credentials. +func (s *Socket) GetPeerCred() (*syscall.Ucred, error) { + fd, ok := s.enterFD() + if !ok { + return nil, syscall.EBADF + } + defer s.gate.Leave() + + return syscall.GetsockoptUcred(fd, syscall.SOL_SOCKET, syscall.SO_PEERCRED) +} + +// SocketReader wraps an individual receive operation. +// +// This may be used for doing vectorized reads and/or sending additional +// control messages (e.g. FDs). The normal entrypoint is ReadVec. +// +// One of ExtractFDs or DisposeFDs must be called if EnableFDs is used. +type SocketReader struct { + socket *Socket + source []byte + blocking bool + race *int32 + + ControlMessage +} + +// Reader returns a reader for this socket. +func (s *Socket) Reader(blocking bool) SocketReader { + return SocketReader{socket: s, blocking: blocking, race: s.race} +} + +// Read implements io.Reader.Read. +func (s *Socket) Read(p []byte) (int, error) { + r := s.Reader(true) + return r.ReadVec([][]byte{p}) +} + +func (s *Socket) shutdown(fd int) error { + // Shutdown the socket to cancel any pending accepts. + return syscall.Shutdown(fd, syscall.SHUT_RDWR) +} + +// Shutdown closes the socket for read and write. +func (s *Socket) Shutdown() error { + fd, ok := s.enterFD() + if !ok { + return syscall.EBADF + } + defer s.gate.Leave() + + return s.shutdown(fd) +} + +// ServerSocket is a bound unix domain socket. +type ServerSocket struct { + socket *Socket +} + +// NewServerSocket returns a socket from an existing FD. +func NewServerSocket(fd int) (*ServerSocket, error) { + s, err := NewSocket(fd) + if err != nil { + return nil, err + } + return &ServerSocket{socket: s}, nil +} + +// Bind creates and binds a new socket. +func Bind(addr string, packet bool) (*ServerSocket, error) { + fd, err := socket(packet) + if err != nil { + return nil, err + } + + // Do the bind. + usa := &syscall.SockaddrUnix{Name: addr} + if err := syscall.Bind(fd, usa); err != nil { + syscall.Close(fd) + return nil, err + } + + return NewServerSocket(fd) +} + +// BindAndListen creates, binds and listens on a new socket. +func BindAndListen(addr string, packet bool) (*ServerSocket, error) { + s, err := Bind(addr, packet) + if err != nil { + return nil, err + } + + // Start listening. + if err := s.Listen(); err != nil { + s.Close() + return nil, err + } + + return s, nil +} + +// Listen starts listening on the socket. +func (s *ServerSocket) Listen() error { + fd, ok := s.socket.enterFD() + if !ok { + return syscall.EBADF + } + defer s.socket.gate.Leave() + + return syscall.Listen(fd, backlog) +} + +// Accept accepts a new connection. +// +// This is always blocking. +// +// Preconditions: +// * ServerSocket is listening (Listen called). +func (s *ServerSocket) Accept() (*Socket, error) { + fd, ok := s.socket.enterFD() + if !ok { + return nil, syscall.EBADF + } + defer s.socket.gate.Leave() + + for { + nfd, _, err := syscall.Accept(fd) + switch err { + case nil: + return NewSocket(nfd) + case syscall.EAGAIN: + err = s.socket.wait(false) + if err == errClosing { + err = syscall.EBADF + } + } + if err != nil { + return nil, err + } + } +} + +// Close closes the server socket. +// +// This must only be called once. +func (s *ServerSocket) Close() error { + return s.socket.Close() +} + +// FD returns the socket's file descriptor. +// +// See Socket.FD. +func (s *ServerSocket) FD() int { + return s.socket.FD() +} + +// Release releases ownership of the socket's file descriptor. +// +// See Socket.Release. +func (s *ServerSocket) Release() (int, error) { + return s.socket.Release() +} diff --git a/pkg/unet/unet_test.go b/pkg/unet/unet_test.go new file mode 100644 index 000000000..6c546825f --- /dev/null +++ b/pkg/unet/unet_test.go @@ -0,0 +1,693 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unet + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sync" + "syscall" + "testing" + "time" +) + +func randomFilename() (string, error) { + // Return a randomly generated file in the test dir. + f, err := ioutil.TempFile("", "unet-test") + if err != nil { + return "", err + } + file := f.Name() + os.Remove(file) + f.Close() + + cwd, err := os.Getwd() + if err != nil { + return "", err + } + + // NOTE: We try to use relative path if possible. This is + // to help conforming to the unix path length limit. + if rel, err := filepath.Rel(cwd, file); err == nil { + return rel, nil + } + + return file, nil +} + +func TestConnectFailure(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + if _, err := Connect(name, false); err == nil { + t.Fatalf("connect was successful, expected err") + } +} + +func TestBindFailure(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("first bind failed, got err %v expected nil", err) + } + defer ss.Close() + + if _, err = BindAndListen(name, false); err == nil { + t.Fatalf("second bind succeeded, expected non-nil err") + } +} + +func TestMultipleAccept(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("first bind failed, got err %v expected nil", err) + } + defer ss.Close() + + // Connect backlog times asynchronously. + var wg sync.WaitGroup + defer wg.Wait() + for i := 0; i < backlog; i++ { + wg.Add(1) + go func() { + defer wg.Done() + s, err := Connect(name, false) + if err != nil { + t.Fatalf("connect failed, got err %v expected nil", err) + } + s.Close() + }() + } + + // Accept backlog times. + for i := 0; i < backlog; i++ { + s, err := ss.Accept() + if err != nil { + t.Errorf("accept failed, got err %v expected nil", err) + continue + } + s.Close() + } +} + +func TestServerClose(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("first bind failed, got err %v expected nil", err) + } + + // Make sure the first close succeeds. + if err := ss.Close(); err != nil { + t.Fatalf("first close failed, got err %v expected nil", err) + } + + // The second one should fail. + if err := ss.Close(); err == nil { + t.Fatalf("second close succeeded, expected non-nil err") + } +} + +func socketPair(t *testing.T, packet bool) (*Socket, *Socket) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + // Bind a server. + ss, err := BindAndListen(name, packet) + if err != nil { + t.Fatalf("error binding, got %v expected nil", err) + } + defer ss.Close() + + // Accept a client. + acceptSocket := make(chan *Socket) + acceptErr := make(chan error) + go func() { + server, err := ss.Accept() + if err != nil { + acceptErr <- err + } + acceptSocket <- server + }() + + // Connect the client. + client, err := Connect(name, packet) + if err != nil { + t.Fatalf("error connecting, got %v expected nil", err) + } + + // Grab the server handle. + select { + case server := <-acceptSocket: + return server, client + case err := <-acceptErr: + t.Fatalf("accept error: %v", err) + } + panic("unreachable") +} + +func TestSendRecv(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Write on the client. + w := client.Writer(true) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for client write, got n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Read on the server. + b := [][]byte{{'b'}} + r := server.Reader(true) + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for server read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } +} + +// TestSymmetric exists to assert that the two sockets received from socketPair +// are interchangeable. They should be, this just provides a basic sanity check +// by running TestSendRecv "backwards". +func TestSymmetric(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Write on the server. + w := server.Writer(true) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for server write, got n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Read on the client. + b := [][]byte{{'b'}} + r := client.Reader(true) + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for client read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } +} + +func TestPacket(t *testing.T) { + server, client := socketPair(t, true) + defer server.Close() + defer client.Close() + + // Write on the client. + w := client.Writer(true) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for client write, got n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Write on the client again. + w = client.Writer(true) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for client write, got n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Read on the server. + // + // This should only get back a single byte, despite the buffer + // being size two. This is because it's a _packet_ buffer. + b := [][]byte{{'b', 'b'}} + r := server.Reader(true) + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for server read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } + + // Do it again. + r = server.Reader(true) + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for server read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } +} + +func TestClose(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + + // Make sure the first close succeeds. + if err := client.Close(); err != nil { + t.Fatalf("first close failed, got err %v expected nil", err) + } + + // The second one should fail. + if err := client.Close(); err == nil { + t.Fatalf("second close succeeded, expected non-nil err") + } +} + +func TestNonBlockingSend(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Try up to 1000 writes, of 1000 bytes. + blockCount := 0 + for i := 0; i < 1000; i++ { + w := client.Writer(false) + if n, err := w.WriteVec([][]byte{make([]byte, 1000)}); n != 1000 || err != nil { + if err == syscall.EWOULDBLOCK || err == syscall.EAGAIN { + // We're good. That's what we wanted. + blockCount++ + } else { + t.Fatalf("for client write, got n=%d err=%v, expected n=1000 err=nil", n, err) + } + } + } + + if blockCount == 1000 { + // Shouldn't have _always_ blocked. + t.Fatalf("socket always blocked!") + } else if blockCount == 0 { + // Should have started blocking eventually. + t.Fatalf("socket never blocked!") + } +} + +func TestNonBlockingRecv(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + b := [][]byte{{'b'}} + r := client.Reader(false) + + // Expected to block immediately. + _, err := r.ReadVec(b) + if err != syscall.EWOULDBLOCK && err != syscall.EAGAIN { + t.Fatalf("read didn't block, got err %v expected blocking err", err) + } + + // Put some data in the pipe. + w := server.Writer(false) + if n, err := w.WriteVec(b); n != 1 || err != nil { + t.Fatalf("write failed with n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Expect it not to block. + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("read failed with n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Expect it to return a block error again. + r = client.Reader(false) + _, err = r.ReadVec(b) + if err != syscall.EWOULDBLOCK && err != syscall.EAGAIN { + t.Fatalf("read didn't block, got err %v expected blocking err", err) + } +} + +func TestRecvVectors(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Write on the client. + w := client.Writer(true) + if n, err := w.WriteVec([][]byte{{'a', 'b'}}); n != 2 || err != nil { + t.Fatalf("for client write, got n=%d err=%v, expected n=2 err=nil", n, err) + } + + // Read on the server. + b := [][]byte{{'c'}, {'c'}} + r := server.Reader(true) + if n, err := r.ReadVec(b); n != 2 || err != nil { + t.Fatalf("for server read, got n=%d err=%v, expected n=2 err=nil", n, err) + } + if b[0][0] != 'a' || b[1][0] != 'b' { + t.Fatalf("got bad read data, got %c,%c, expected a,b", b[0][0], b[1][0]) + } +} + +func TestSendVectors(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Write on the client. + w := client.Writer(true) + if n, err := w.WriteVec([][]byte{{'a'}, {'b'}}); n != 2 || err != nil { + t.Fatalf("for client write, got n=%d err=%v, expected n=2 err=nil", n, err) + } + + // Read on the server. + b := [][]byte{{'c', 'c'}} + r := server.Reader(true) + if n, err := r.ReadVec(b); n != 2 || err != nil { + t.Fatalf("for server read, got n=%d err=%v, expected n=2 err=nil", n, err) + } + if b[0][0] != 'a' || b[0][1] != 'b' { + t.Fatalf("got bad read data, got %c,%c, expected a,b", b[0][0], b[0][1]) + } +} + +func TestSendFDsNotEnabled(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Write on the server. + w := server.Writer(true) + w.PackFDs(0, 1, 2) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for server write, got n=%d err=%v, expected n=1 err=nil", n, err) + } + + // Read on the client, without enabling FDs. + b := [][]byte{{'b'}} + r := client.Reader(true) + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for client read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } + + // Make sure the FDs are not received. + fds, err := r.ExtractFDs() + if len(fds) != 0 || err != nil { + t.Fatalf("got fds=%v err=%v, expected len(fds)=0 err=nil", fds, err) + } +} + +func sendFDs(t *testing.T, s *Socket, fds []int) { + w := s.Writer(true) + w.PackFDs(fds...) + if n, err := w.WriteVec([][]byte{{'a'}}); n != 1 || err != nil { + t.Fatalf("for write, got n=%d err=%v, expected n=1 err=nil", n, err) + } +} + +func recvFDs(t *testing.T, s *Socket, enableSize int, origFDs []int) { + expected := len(origFDs) + + // Count the number of FDs. + preEntries, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + t.Fatalf("can't readdir, got err %v expected nil", err) + } + + // Read on the client. + b := [][]byte{{'b'}} + r := s.Reader(true) + if enableSize >= 0 { + r.EnableFDs(enableSize) + } + if n, err := r.ReadVec(b); n != 1 || err != nil { + t.Fatalf("for client read, got n=%d err=%v, expected n=1 err=nil", n, err) + } + if b[0][0] != 'a' { + t.Fatalf("got bad read data, got %c, expected a", b[0][0]) + } + + // Count the new number of FDs. + postEntries, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + t.Fatalf("can't readdir, got err %v expected nil", err) + } + if len(preEntries)+expected != len(postEntries) { + t.Errorf("process fd count isn't right, expected %d got %d", len(preEntries)+expected, len(postEntries)) + } + + // Make sure the FDs are there. + fds, err := r.ExtractFDs() + if len(fds) != expected || err != nil { + t.Fatalf("got fds=%v err=%v, expected len(fds)=%d err=nil", fds, err, expected) + } + + // Make sure they are different from the originals. + for i := 0; i < len(fds); i++ { + if fds[i] == origFDs[i] { + t.Errorf("got original fd for index %d, expected different", i) + } + } + + // Make sure they can be accessed as expected. + for i := 0; i < len(fds); i++ { + var st syscall.Stat_t + if err := syscall.Fstat(fds[i], &st); err != nil { + t.Errorf("fds[%d] can't be stated, got err %v expected nil", i, err) + } + } + + // Close them off. + r.CloseFDs() + + // Make sure the count is back to normal. + finalEntries, err := ioutil.ReadDir("/proc/self/fd") + if err != nil { + t.Fatalf("can't readdir, got err %v expected nil", err) + } + if len(finalEntries) != len(preEntries) { + t.Errorf("process fd count isn't right, expected %d got %d", len(preEntries), len(finalEntries)) + } +} + +func TestFDsSingle(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0}) + recvFDs(t, client, 1, []int{0}) +} + +func TestFDsMultiple(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + // Basic case, multiple FDs. + sendFDs(t, server, []int{0, 1, 2}) + recvFDs(t, client, 3, []int{0, 1, 2}) +} + +// See TestSymmetric above. +func TestFDsSymmetric(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0, 1, 2}) + recvFDs(t, client, 3, []int{0, 1, 2}) +} + +func TestFDsReceiveLargeBuffer(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0}) + recvFDs(t, client, 3, []int{0}) +} + +func TestFDsReceiveSmallBuffer(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0, 1, 2}) + + // Per the spec, we may still receive more than the buffer. In fact, + // it'll be rounded up and we can receive two with a size one buffer. + recvFDs(t, client, 1, []int{0, 1}) +} + +func TestFDsReceiveNotEnabled(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0}) + recvFDs(t, client, -1, []int{}) +} + +func TestFDsReceiveSizeZero(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + sendFDs(t, server, []int{0}) + recvFDs(t, client, 0, []int{}) +} + +func TestGetPeerCred(t *testing.T) { + server, client := socketPair(t, false) + defer server.Close() + defer client.Close() + + want := &syscall.Ucred{ + Pid: int32(os.Getpid()), + Uid: uint32(os.Getuid()), + Gid: uint32(os.Getgid()), + } + + if got, err := client.GetPeerCred(); err != nil || !reflect.DeepEqual(got, want) { + t.Errorf("got GetPeerCred() = %v, %v, want = %+v, %+v", got, err, want, nil) + } +} + +func newClosedSocket() (*Socket, error) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, err + } + + s, err := NewSocket(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + + return s, s.Close() +} + +func TestGetPeerCredFailure(t *testing.T) { + s, err := newClosedSocket() + if err != nil { + t.Fatalf("newClosedSocket got error %v want nil", err) + } + + want := "bad file descriptor" + if _, err := s.GetPeerCred(); err == nil || err.Error() != want { + t.Errorf("got s.GetPeerCred() = %v, want = %s", err, want) + } +} + +func TestAcceptClosed(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("bind failed, got err %v expected nil", err) + } + + if err := ss.Close(); err != nil { + t.Fatalf("close failed, got err %v expected nil", err) + } + + if _, err := ss.Accept(); err == nil { + t.Errorf("accept on closed SocketServer, got err %v, want != nil", err) + } +} + +func TestCloseAfterAcceptStart(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("bind failed, got err %v expected nil", err) + } + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + time.Sleep(50 * time.Millisecond) + if err := ss.Close(); err != nil { + t.Fatalf("close failed, got err %v expected nil", err) + } + wg.Done() + }() + + if _, err := ss.Accept(); err == nil { + t.Errorf("accept on closed SocketServer, got err %v, want != nil", err) + } + + wg.Wait() +} + +func TestReleaseAfterAcceptStart(t *testing.T) { + name, err := randomFilename() + if err != nil { + t.Fatalf("unable to generate file, got err %v expected nil", err) + } + + ss, err := BindAndListen(name, false) + if err != nil { + t.Fatalf("bind failed, got err %v expected nil", err) + } + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + time.Sleep(50 * time.Millisecond) + fd, err := ss.Release() + if err != nil { + t.Fatalf("Release failed, got err %v expected nil", err) + } + syscall.Close(fd) + wg.Done() + }() + + if _, err := ss.Accept(); err == nil { + t.Errorf("accept on closed SocketServer, got err %v, want != nil", err) + } + + wg.Wait() +} + +func TestControlMessage(t *testing.T) { + for i := 0; i <= 10; i++ { + var want []int + for j := 0; j < i; j++ { + want = append(want, i+j+1) + } + + var cm ControlMessage + cm.EnableFDs(i) + cm.PackFDs(want...) + got, err := cm.ExtractFDs() + if err != nil || !reflect.DeepEqual(got, want) { + t.Errorf("got cm.ExtractFDs() = %v, %v, want = %v, %v", got, err, want, nil) + } + } +} diff --git a/pkg/unet/unet_unsafe.go b/pkg/unet/unet_unsafe.go new file mode 100644 index 000000000..fa15cf744 --- /dev/null +++ b/pkg/unet/unet_unsafe.go @@ -0,0 +1,287 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unet + +import ( + "io" + "math" + "sync/atomic" + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" +) + +// wait blocks until the socket FD is ready for reading or writing, depending +// on the value of write. +// +// Returns errClosing if the Socket is in the process of closing. +func (s *Socket) wait(write bool) error { + for { + // Checking the FD on each loop is not strictly necessary, it + // just avoids an extra poll call. + fd := atomic.LoadInt32(&s.fd) + if fd < 0 { + return errClosing + } + + events := []linux.PollFD{ + { + // The actual socket FD. + FD: fd, + Events: linux.POLLIN, + }, + { + // The eventfd, signaled when we are closing. + FD: int32(s.efd), + Events: linux.POLLIN, + }, + } + if write { + events[0].Events = linux.POLLOUT + } + + _, _, e := syscall.Syscall(syscall.SYS_POLL, uintptr(unsafe.Pointer(&events[0])), 2, uintptr(math.MaxUint64)) + if e == syscall.EINTR { + continue + } + if e != 0 { + return e + } + + if events[1].REvents&linux.POLLIN == linux.POLLIN { + // eventfd signaled, we're closing. + return errClosing + } + + return nil + } +} + +// buildIovec builds an iovec slice from the given []byte slice. +// +// iovecs is used as an initial slice, to avoid excessive allocations. +func buildIovec(bufs [][]byte, iovecs []syscall.Iovec) ([]syscall.Iovec, int) { + var length int + for i := range bufs { + if l := len(bufs[i]); l > 0 { + iovecs = append(iovecs, syscall.Iovec{ + Base: &bufs[i][0], + Len: uint64(l), + }) + length += l + } + } + return iovecs, length +} + +// ReadVec implements vecio.Reader.ReadVec. +// +// This function is not guaranteed to read all available data, it +// returns as soon as a single recvmsg call succeeds. +func (r *SocketReader) ReadVec(bufs [][]byte) (int, error) { + iovecs, length := buildIovec(bufs, make([]syscall.Iovec, 0, 2)) + + var msg syscall.Msghdr + if len(r.source) != 0 { + msg.Name = &r.source[0] + msg.Namelen = uint32(len(r.source)) + } + + if len(r.ControlMessage) != 0 { + msg.Control = &r.ControlMessage[0] + msg.Controllen = uint64(len(r.ControlMessage)) + } + + if len(iovecs) != 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + + // n is the bytes received. + var n uintptr + + fd, ok := r.socket.enterFD() + if !ok { + return 0, syscall.EBADF + } + // Leave on returns below. + for { + var e syscall.Errno + + // Try a non-blocking recv first, so we don't give up the go runtime M. + n, _, e = syscall.RawSyscall(syscall.SYS_RECVMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), syscall.MSG_DONTWAIT|syscall.MSG_TRUNC) + if e == 0 { + break + } + if e == syscall.EINTR { + continue + } + if !r.blocking { + r.socket.gate.Leave() + return 0, e + } + if e != syscall.EAGAIN && e != syscall.EWOULDBLOCK { + r.socket.gate.Leave() + return 0, e + } + + // Wait for the socket to become readable. + err := r.socket.wait(false) + if err == errClosing { + err = syscall.EBADF + } + if err != nil { + r.socket.gate.Leave() + return 0, err + } + } + + r.socket.gate.Leave() + + if msg.Controllen < uint64(len(r.ControlMessage)) { + r.ControlMessage = r.ControlMessage[:msg.Controllen] + } + + if msg.Namelen < uint32(len(r.source)) { + r.source = r.source[:msg.Namelen] + } + + // All unet sockets are SOCK_STREAM or SOCK_SEQPACKET, both of which + // indicate that the other end is closed by returning a 0-length read + // with no error. + if n == 0 { + return 0, io.EOF + } + + if r.race != nil { + // See comments on Socket.race. + atomic.AddInt32(r.race, 1) + } + + if int(n) > length { + return length, errMessageTruncated + } + + return int(n), nil +} + +// WriteVec implements vecio.Writer.WriteVec. +// +// This function is not guaranteed to send all data, it returns +// as soon as a single sendmsg call succeeds. +func (w *SocketWriter) WriteVec(bufs [][]byte) (int, error) { + iovecs, _ := buildIovec(bufs, make([]syscall.Iovec, 0, 2)) + + if w.race != nil { + // See comments on Socket.race. + atomic.AddInt32(w.race, 1) + } + + var msg syscall.Msghdr + if len(w.to) != 0 { + msg.Name = &w.to[0] + msg.Namelen = uint32(len(w.to)) + } + + if len(w.ControlMessage) != 0 { + msg.Control = &w.ControlMessage[0] + msg.Controllen = uint64(len(w.ControlMessage)) + } + + if len(iovecs) > 0 { + msg.Iov = &iovecs[0] + msg.Iovlen = uint64(len(iovecs)) + } + + fd, ok := w.socket.enterFD() + if !ok { + return 0, syscall.EBADF + } + // Leave on returns below. + for { + // Try a non-blocking send first, so we don't give up the go runtime M. + n, _, e := syscall.RawSyscall(syscall.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(&msg)), syscall.MSG_DONTWAIT|syscall.MSG_NOSIGNAL) + if e == 0 { + w.socket.gate.Leave() + return int(n), nil + } + if e == syscall.EINTR { + continue + } + if !w.blocking { + w.socket.gate.Leave() + return 0, e + } + if e != syscall.EAGAIN && e != syscall.EWOULDBLOCK { + w.socket.gate.Leave() + return 0, e + } + + // Wait for the socket to become writeable. + err := w.socket.wait(true) + if err == errClosing { + err = syscall.EBADF + } + if err != nil { + w.socket.gate.Leave() + return 0, err + } + } + // Unreachable, no s.gate.Leave needed. +} + +// getsockopt issues a getsockopt syscall. +func getsockopt(fd int, level int, optname int, buf []byte) (uint32, error) { + l := uint32(len(buf)) + _, _, e := syscall.RawSyscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&l)), 0) + if e != 0 { + return 0, e + } + + return l, nil +} + +// setsockopt issues a setsockopt syscall. +func setsockopt(fd int, level int, optname int, buf []byte) error { + _, _, e := syscall.RawSyscall6(syscall.SYS_SETSOCKOPT, uintptr(fd), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0) + if e != 0 { + return e + } + + return nil +} + +// getsockname issues a getsockname syscall. +func getsockname(fd int, buf []byte) (uint32, error) { + l := uint32(len(buf)) + _, _, e := syscall.RawSyscall(syscall.SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&l))) + if e != 0 { + return 0, e + } + + return l, nil +} + +// getpeername issues a getpeername syscall. +func getpeername(fd int, buf []byte) (uint32, error) { + l := uint32(len(buf)) + _, _, e := syscall.RawSyscall(syscall.SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&l))) + if e != 0 { + return 0, e + } + + return l, nil +} diff --git a/pkg/urpc/BUILD b/pkg/urpc/BUILD new file mode 100644 index 000000000..b29b25637 --- /dev/null +++ b/pkg/urpc/BUILD @@ -0,0 +1,22 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "urpc", + srcs = ["urpc.go"], + importpath = "gvisor.googlesource.com/gvisor/pkg/urpc", + visibility = ["//:sandbox"], + deps = [ + "//pkg/log", + "//pkg/unet", + ], +) + +go_test( + name = "urpc_test", + size = "small", + srcs = ["urpc_test.go"], + embed = [":urpc"], + deps = ["//pkg/unet"], +) diff --git a/pkg/urpc/urpc.go b/pkg/urpc/urpc.go new file mode 100644 index 000000000..adf643084 --- /dev/null +++ b/pkg/urpc/urpc.go @@ -0,0 +1,602 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package urpc provides a minimal RPC package based on unet. +// +// RPC requests are _not_ concurrent and methods must be explicitly +// registered. However, files may be send as part of the payload. +package urpc + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "reflect" + "runtime" + "sync" + + "gvisor.googlesource.com/gvisor/pkg/log" + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +// maxFiles determines the maximum file payload. +const maxFiles = 16 + +// ErrTooManyFiles is returned when too many file descriptors are mapped. +var ErrTooManyFiles = errors.New("too many files") + +// ErrUnknownMethod is returned when a method is not known. +var ErrUnknownMethod = errors.New("unknown method") + +// errStopped is an internal error indicating the server has been stopped. +var errStopped = errors.New("stopped") + +// RemoteError is an error returned by the remote invocation. +// +// This indicates that the RPC transport was correct, but that the called +// function itself returned an error. +type RemoteError struct { + // Message is the result of calling Error() on the remote error. + Message string +} + +// Error returns the remote error string. +func (r RemoteError) Error() string { + return r.Message +} + +// FilePayload may be _embedded_ in another type in order to send or receive a +// file as a result of an RPC. These are not actually serialized, rather they +// are sent via an accompanying SCM_RIGHTS message (plumbed through the unet +// package). +type FilePayload struct { + Files []*os.File `json:"-"` +} + +// filePayload returns the file. It may be nil. +func (f *FilePayload) filePayload() []*os.File { + return f.Files +} + +// setFilePayload sets the payload. +func (f *FilePayload) setFilePayload(fs []*os.File) { + f.Files = fs +} + +// closeAll closes a slice of files. +func closeAll(files []*os.File) { + for _, f := range files { + f.Close() + } +} + +// filePayloader is implemented only by FilePayload and will be implicitly +// implemented by types that have the FilePayload embedded. Note that there is +// no way to implement these methods other than by embedding FilePayload, due +// to the way unexported method names are mangled. +type filePayloader interface { + filePayload() []*os.File + setFilePayload([]*os.File) +} + +// clientCall is the client=>server method call on the client side. +type clientCall struct { + Method string `json:"method"` + Arg interface{} `json:"arg"` +} + +// serverCall is the client=>server method call on the server side. +type serverCall struct { + Method string `json:"method"` + Arg json.RawMessage `json:"arg"` +} + +// callResult is the server=>client method call result. +type callResult struct { + Success bool `json:"success"` + Err string `json:"err"` + Result interface{} `json:"result"` +} + +// registeredMethod is method registered with the server. +type registeredMethod struct { + // fn is the underlying function. + fn reflect.Value + + // rcvr is the receiver value. + rcvr reflect.Value + + // argType is a typed argument. + argType reflect.Type + + // resultType is also a type result. + resultType reflect.Type +} + +// clientState is client metadata. +// +// The following are valid states: +// +// idle - not processing any requests, no close request. +// processing - actively processing, no close request. +// closeRequested - actively processing, pending close. +// closed - client connection has been closed. +// +// The following transitions are possible: +// +// idle -> processing, closed +// processing -> idle, closeRequested +// closeRequested -> closed +// +type clientState int + +// See clientState. +const ( + idle clientState = iota + processing + closeRequested + closed +) + +// Server is an RPC server. +type Server struct { + // mu protects all fields, except wg. + mu sync.Mutex + + // methods is the set of server methods. + methods map[string]registeredMethod + + // clients is a map of clients. + clients map[*unet.Socket]clientState + + // wg is a wait group for all outstanding clients. + wg sync.WaitGroup +} + +// NewServer returns a new server. +func NewServer() *Server { + return &Server{ + methods: make(map[string]registeredMethod), + clients: make(map[*unet.Socket]clientState), + } +} + +// Register registers the given object as an RPC receiver. +// +// This functions is the same way as the built-in RPC package, but it does not +// tolerate any object with non-conforming methods. Any non-confirming methods +// will lead to an immediate panic, instead of being skipped or an error. +// Panics will also be generated by anonymous objects and duplicate entries. +func (s *Server) Register(obj interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + + typ := reflect.TypeOf(obj) + + // If we got a pointer, deref it to the underlying object. We need this to + // obtain the name of the underlying type. + typDeref := typ + if typ.Kind() == reflect.Ptr { + typDeref = typ.Elem() + } + + for m := 0; m < typ.NumMethod(); m++ { + method := typ.Method(m) + + if typDeref.Name() == "" { + // Can't be anonymous. + panic("type not named.") + } + + prettyName := typDeref.Name() + "." + method.Name + if _, ok := s.methods[prettyName]; ok { + // Duplicate entry. + panic(fmt.Sprintf("method %s is duplicated.", prettyName)) + } + + if method.PkgPath != "" { + // Must be exported. + panic(fmt.Sprintf("method %s is not exported.", prettyName)) + } + mtype := method.Type + if mtype.NumIn() != 3 { + // Need exactly two arguments (+ receiver). + panic(fmt.Sprintf("method %s has wrong number of arguments.", prettyName)) + } + argType := mtype.In(1) + if argType.Kind() != reflect.Ptr { + // Need arg pointer. + panic(fmt.Sprintf("method %s has non-pointer first argument.", prettyName)) + } + resultType := mtype.In(2) + if resultType.Kind() != reflect.Ptr { + // Need result pointer. + panic(fmt.Sprintf("method %s has non-pointer second argument.", prettyName)) + } + if mtype.NumOut() != 1 { + // Need single return. + panic(fmt.Sprintf("method %s has wrong number of returns.", prettyName)) + } + if returnType := mtype.Out(0); returnType != reflect.TypeOf((*error)(nil)).Elem() { + // Need error return. + panic(fmt.Sprintf("method %s has non-error return value.", prettyName)) + } + + // Register the method. + s.methods[prettyName] = registeredMethod{ + fn: method.Func, + rcvr: reflect.ValueOf(obj), + argType: argType, + resultType: resultType, + } + } +} + +// lookup looks up the given method. +func (s *Server) lookup(method string) (registeredMethod, bool) { + s.mu.Lock() + defer s.mu.Unlock() + rm, ok := s.methods[method] + return rm, ok +} + +// handleOne handles a single call. +func (s *Server) handleOne(client *unet.Socket) error { + // Unmarshal the call. + var c serverCall + newFs, err := unmarshal(client, &c) + if err != nil { + // Client is dead. + return err + } + + // Start the request. + if !s.clientBeginRequest(client) { + // Client is dead; don't process this call. + return errStopped + } + defer s.clientEndRequest(client) + + // Lookup the method. + rm, ok := s.lookup(c.Method) + if !ok { + // Try to serialize the error. + return marshal(client, &callResult{Err: ErrUnknownMethod.Error()}, nil) + } + + // Unmarshal the arguments now that we know the type. + na := reflect.New(rm.argType.Elem()) + if err := json.Unmarshal(c.Arg, na.Interface()); err != nil { + return marshal(client, &callResult{Err: err.Error()}, nil) + } + + // Set the file payload as an argument. + if fp, ok := na.Interface().(filePayloader); ok { + fp.setFilePayload(newFs) + } + + // Call the method. + re := reflect.New(rm.resultType.Elem()) + rValues := rm.fn.Call([]reflect.Value{rm.rcvr, na, re}) + if errVal := rValues[0].Interface(); errVal != nil { + return marshal(client, &callResult{Err: errVal.(error).Error()}, nil) + } + + // Set the resulting payload. + var fs []*os.File + if fp, ok := re.Interface().(filePayloader); ok { + fs = fp.filePayload() + if len(fs) > maxFiles { + // Ugh. Send an error to the client, despite success. + return marshal(client, &callResult{Err: ErrTooManyFiles.Error()}, nil) + } + } + + // Marshal the result. + return marshal(client, &callResult{Success: true, Result: re.Interface()}, fs) +} + +// clientBeginRequest begins a request. +// +// If true is returned, the request may be processed. If false is returned, +// then the server has been stopped and the request should be skipped. +func (s *Server) clientBeginRequest(client *unet.Socket) bool { + s.mu.Lock() + defer s.mu.Unlock() + switch state := s.clients[client]; state { + case idle: + // Mark as processing. + s.clients[client] = processing + return true + case closed: + // Whoops, how did this happen? Must have closed immediately + // following the deserialization. Don't let the RPC actually go + // through, since we won't be able to serialize a proper + // response. + return false + default: + // Should not happen. + panic(fmt.Sprintf("expected idle or closed, got %d", state)) + } + + // Unreachable. + return false +} + +// clientEndRequest ends a request. +func (s *Server) clientEndRequest(client *unet.Socket) { + s.mu.Lock() + defer s.mu.Unlock() + switch state := s.clients[client]; state { + case processing: + // Return to idle. + s.clients[client] = idle + case closeRequested: + // Close the connection. + client.Close() + s.clients[client] = closed + default: + // Should not happen. + panic(fmt.Sprintf("expected processing or requestClose, got %d", state)) + } +} + +// clientRegister registers a connection. +// +// See Stop for more context. +func (s *Server) clientRegister(client *unet.Socket) { + s.mu.Lock() + defer s.mu.Unlock() + s.clients[client] = idle + s.wg.Add(1) +} + +// clientUnregister unregisters and closes a connection if necessary. +// +// See Stop for more context. +func (s *Server) clientUnregister(client *unet.Socket) { + s.mu.Lock() + defer s.mu.Unlock() + switch state := s.clients[client]; state { + case idle: + // Close the connection. + client.Close() + case closed: + // Already done. + default: + // Should not happen. + panic(fmt.Sprintf("expected idle or closed, got %d", state)) + } + delete(s.clients, client) + s.wg.Done() +} + +// handleRegistered handles calls from a registered client. +func (s *Server) handleRegistered(client *unet.Socket) error { + for { + // Handle one call. + if err := s.handleOne(client); err != nil { + // Client is dead. + return err + } + } +} + +// Handle synchronously handles a single client over a connection. +func (s *Server) Handle(client *unet.Socket) error { + s.clientRegister(client) + defer s.clientUnregister(client) + return s.handleRegistered(client) +} + +// StartHandling creates a goroutine that handles a single client over a +// connection. +func (s *Server) StartHandling(client *unet.Socket) { + s.clientRegister(client) + go func() { // S/R-SAFE: out of scope + defer s.clientUnregister(client) + s.handleRegistered(client) + }() +} + +// Stop safely terminates outstanding clients. +// +// No new requests should be initiated after calling Stop. Existing clients +// will be closed after completing any pending RPCs. This method will block +// until all clients have disconnected. +func (s *Server) Stop() { + // Wait for all outstanding requests. + defer s.wg.Wait() + + // Close all known clients. + s.mu.Lock() + defer s.mu.Unlock() + for client, state := range s.clients { + switch state { + case idle: + // Close connection now. + client.Close() + s.clients[client] = closed + case processing: + // Request close when done. + s.clients[client] = closeRequested + } + } +} + +// Client is a urpc client. +type Client struct { + // mu protects all members. + // + // It also enforces single-call semantics. + mu sync.Mutex + + // Socket is the underlying socket for this client. + // + // This _must_ be provided and must be closed manually by calling + // Close. + Socket *unet.Socket +} + +// NewClient returns a new client. +func NewClient(socket *unet.Socket) *Client { + return &Client{ + Socket: socket, + } +} + +// marshal sends the given FD and json struct. +func marshal(s *unet.Socket, v interface{}, fs []*os.File) error { + // Marshal to a buffer. + data, err := json.Marshal(v) + if err != nil { + log.Warningf("urpc: error marshalling %s: %s", fmt.Sprintf("%v", v), err.Error()) + return err + } + + // Write to the socket. + w := s.Writer(true) + if fs != nil { + var fds []int + for _, f := range fs { + fds = append(fds, int(f.Fd())) + } + w.PackFDs(fds...) + } + + // Send. + for n := 0; n < len(data); { + cur, err := w.WriteVec([][]byte{data[n:]}) + if n == 0 && cur < len(data) { + // Don't send FDs anymore. This call is only made on + // the first successful call to WriteVec, assuming cur + // is not sufficient to fill the entire buffer. + w.PackFDs() + } + n += cur + if err != nil { + log.Warningf("urpc: error writing %v: %s", data[n:], err.Error()) + return err + } + } + + // We're done sending the fds to the client. Explicitly prevent fs from + // being GCed until here. Urpc rpcs often unlink the file to send, relying + // on the kernel to automatically delete it once the last reference is + // dropped. Until we successfully call sendmsg(2), fs may contain the last + // references to these files. Without this explicit reference to fs here, + // the go runtime is free to assume we're done with fs after the fd + // collection loop above, since it just sees us copying ints. + runtime.KeepAlive(fs) + + log.Debugf("urpc: successfully marshalled %d bytes.", len(data)) + return nil +} + +// unmarhsal receives an FD (optional) and unmarshals the given struct. +func unmarshal(s *unet.Socket, v interface{}) ([]*os.File, error) { + // Receive a single byte. + r := s.Reader(true) + r.EnableFDs(maxFiles) + firstByte := make([]byte, 1) + + // Extract any FDs that may be there. + if _, err := r.ReadVec([][]byte{firstByte}); err != nil { + return nil, err + } + fds, err := r.ExtractFDs() + if err != nil { + log.Warningf("urpc: error extracting fds: %s", err.Error()) + return nil, err + } + var fs []*os.File + for _, fd := range fds { + fs = append(fs, os.NewFile(uintptr(fd), "urpc")) + } + + // Read the rest. + d := json.NewDecoder(io.MultiReader(bytes.NewBuffer(firstByte), s)) + // urpc internally decodes / re-encodes the data with interface{} as the + // intermediate type. We have to unmarshal integers to json.Number type + // instead of the default float type for those intermediate values, such + // that when they get re-encoded, their values are not printed out in + // floating-point formats such as 1e9, which could not be decoded to + // explicitly typed intergers later. + d.UseNumber() + if err := d.Decode(v); err != nil { + log.Warningf("urpc: error decoding: %s", err.Error()) + for _, f := range fs { + f.Close() + } + return nil, err + } + + // All set. + log.Debugf("urpc: unmarshal success.") + return fs, nil +} + +// Call calls a function. +func (c *Client) Call(method string, arg interface{}, result interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + + // Are there files to send? + var fs []*os.File + if fp, ok := arg.(filePayloader); ok { + fs = fp.filePayload() + if len(fs) > maxFiles { + return ErrTooManyFiles + } + } + + // Marshal the data. + if err := marshal(c.Socket, &clientCall{Method: method, Arg: arg}, fs); err != nil { + return err + } + + // Wait for the response. + callR := callResult{Result: result} + newFs, err := unmarshal(c.Socket, &callR) + if err != nil { + return err + } + + // Set the file payload. + if fp, ok := result.(filePayloader); ok { + fp.setFilePayload(newFs) + } else { + closeAll(newFs) + } + + // Did an error occur? + if !callR.Success { + return RemoteError{Message: callR.Err} + } + + // All set. + return nil +} + +// Close closes the underlying socket. +// +// Further calls to the client may result in undefined behavior. +func (c *Client) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.Socket.Close() +} diff --git a/pkg/urpc/urpc_test.go b/pkg/urpc/urpc_test.go new file mode 100644 index 000000000..d9cfc512e --- /dev/null +++ b/pkg/urpc/urpc_test.go @@ -0,0 +1,210 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package urpc + +import ( + "errors" + "os" + "testing" + + "gvisor.googlesource.com/gvisor/pkg/unet" +) + +type test struct { +} + +type testArg struct { + StringArg string + IntArg int + FilePayload +} + +type testResult struct { + StringResult string + IntResult int + FilePayload +} + +func (t test) Func(a *testArg, r *testResult) error { + r.StringResult = a.StringArg + r.IntResult = a.IntArg + return nil +} + +func (t test) Err(a *testArg, r *testResult) error { + return errors.New("test error") +} + +func (t test) FailNoFile(a *testArg, r *testResult) error { + if a.Files == nil { + return errors.New("no file found") + } + + return nil +} + +func (t test) SendFile(a *testArg, r *testResult) error { + r.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr} + return nil +} + +func (t test) TooManyFiles(a *testArg, r *testResult) error { + for i := 0; i <= maxFiles; i++ { + r.Files = append(r.Files, os.Stdin) + } + return nil +} + +func startServer(socket *unet.Socket) { + s := NewServer() + s.Register(test{}) + s.StartHandling(socket) +} + +func testClient() (*Client, error) { + serverSock, clientSock, err := unet.SocketPair(false) + if err != nil { + return nil, err + } + startServer(serverSock) + + return NewClient(clientSock), nil +} + +func TestCall(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + if err := c.Call("test.Func", &testArg{}, &r); err != nil { + t.Errorf("basic call failed: %v", err) + } else if r.StringResult != "" || r.IntResult != 0 { + t.Errorf("unexpected result, got %v expected zero value", r) + } + if err := c.Call("test.Func", &testArg{StringArg: "hello"}, &r); err != nil { + t.Errorf("basic call failed: %v", err) + } else if r.StringResult != "hello" { + t.Errorf("unexpected result, got %v expected hello", r.StringResult) + } + if err := c.Call("test.Func", &testArg{IntArg: 1}, &r); err != nil { + t.Errorf("basic call failed: %v", err) + } else if r.IntResult != 1 { + t.Errorf("unexpected result, got %v expected 1", r.IntResult) + } +} + +func TestUnknownMethod(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + if err := c.Call("test.Unknown", &testArg{}, &r); err == nil { + t.Errorf("expected non-nil err, got nil") + } else if err.Error() != ErrUnknownMethod.Error() { + t.Errorf("expected test error, got %v", err) + } +} + +func TestErr(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + if err := c.Call("test.Err", &testArg{}, &r); err == nil { + t.Errorf("expected non-nil err, got nil") + } else if err.Error() != "test error" { + t.Errorf("expected test error, got %v", err) + } +} + +func TestSendFile(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + if err := c.Call("test.FailNoFile", &testArg{}, &r); err == nil { + t.Errorf("expected non-nil err, got nil") + } + if err := c.Call("test.FailNoFile", &testArg{FilePayload: FilePayload{Files: []*os.File{os.Stdin, os.Stdout, os.Stdin}}}, &r); err != nil { + t.Errorf("expected nil err, got %v", err) + } +} + +func TestRecvFile(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + if err := c.Call("test.SendFile", &testArg{}, &r); err != nil { + t.Errorf("expected nil err, got %v", err) + } + if r.Files == nil { + t.Errorf("expected file, got nil") + } +} + +func TestShutdown(t *testing.T) { + serverSock, clientSock, err := unet.SocketPair(false) + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + clientSock.Close() + + s := NewServer() + if err := s.Handle(serverSock); err == nil { + t.Errorf("expected non-nil err, got nil") + } +} + +func TestTooManyFiles(t *testing.T) { + c, err := testClient() + if err != nil { + t.Fatalf("error creating test client: %v", err) + } + defer c.Close() + + var r testResult + var a testArg + for i := 0; i <= maxFiles; i++ { + a.Files = append(a.Files, os.Stdin) + } + + // Client-side error. + if err := c.Call("test.Func", &a, &r); err != ErrTooManyFiles { + t.Errorf("expected ErrTooManyFiles, got %v", err) + } + + // Server-side error. + if err := c.Call("test.TooManyFiles", &testArg{}, &r); err == nil { + t.Errorf("expected non-nil err, got nil") + } else if err.Error() != "too many files" { + t.Errorf("expected too many files, got %v", err.Error()) + } +} diff --git a/pkg/waiter/BUILD b/pkg/waiter/BUILD new file mode 100644 index 000000000..7415dd325 --- /dev/null +++ b/pkg/waiter/BUILD @@ -0,0 +1,36 @@ +package(licenses = ["notice"]) # BSD + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("//tools/go_stateify:defs.bzl", "go_stateify") + +go_stateify( + name = "waiter_state", + srcs = [ + "waiter.go", + ], + out = "waiter_state.go", + package = "waiter", +) + +go_library( + name = "waiter", + srcs = [ + "waiter.go", + "waiter_state.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/waiter", + visibility = ["//visibility:public"], + deps = [ + "//pkg/ilist", + "//pkg/state", + ], +) + +go_test( + name = "waiter_test", + size = "small", + srcs = [ + "waiter_test.go", + ], + embed = [":waiter"], +) diff --git a/pkg/waiter/fdnotifier/BUILD b/pkg/waiter/fdnotifier/BUILD new file mode 100644 index 000000000..d5b5ee82d --- /dev/null +++ b/pkg/waiter/fdnotifier/BUILD @@ -0,0 +1,14 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "fdnotifier", + srcs = [ + "fdnotifier.go", + "poll_unsafe.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/waiter/fdnotifier", + visibility = ["//:sandbox"], + deps = ["//pkg/waiter"], +) diff --git a/pkg/waiter/fdnotifier/fdnotifier.go b/pkg/waiter/fdnotifier/fdnotifier.go new file mode 100644 index 000000000..8bb93e39b --- /dev/null +++ b/pkg/waiter/fdnotifier/fdnotifier.go @@ -0,0 +1,200 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fdnotifier contains an adapter that translates IO events (e.g., a +// file became readable/writable) from native FDs to the notifications in the +// waiter package. It uses epoll in edge-triggered mode to receive notifications +// for registered FDs. +package fdnotifier + +import ( + "fmt" + "sync" + "syscall" + + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +type fdInfo struct { + queue *waiter.Queue + waiting bool +} + +// notifier holds all the state necessary to issue notifications when IO events +// occur in the observed FDs. +type notifier struct { + // epFD is the epoll file descriptor used to register for io + // notifications. + epFD int + + // mu protects fdMap. + mu sync.Mutex + + // fdMap maps file descriptors to their notification queues and waiting + // status. + fdMap map[int32]*fdInfo +} + +// newNotifier creates a new notifier object. +func newNotifier() (*notifier, error) { + epfd, err := syscall.EpollCreate1(0) + if err != nil { + return nil, err + } + + w := ¬ifier{ + epFD: epfd, + fdMap: make(map[int32]*fdInfo), + } + + go w.waitAndNotify() // S/R-SAFE: no waiter exists during save / load. + + return w, nil +} + +// waitFD waits on mask for fd. The fdMap mutex must be hold. +func (n *notifier) waitFD(fd int32, fi *fdInfo, mask waiter.EventMask) error { + if !fi.waiting && mask == 0 { + return nil + } + + e := syscall.EpollEvent{ + Events: uint32(mask) | -syscall.EPOLLET, + Fd: fd, + } + + switch { + case !fi.waiting && mask != 0: + if err := syscall.EpollCtl(n.epFD, syscall.EPOLL_CTL_ADD, int(fd), &e); err != nil { + return err + } + fi.waiting = true + case fi.waiting && mask == 0: + syscall.EpollCtl(n.epFD, syscall.EPOLL_CTL_DEL, int(fd), nil) + fi.waiting = false + case fi.waiting && mask != 0: + if err := syscall.EpollCtl(n.epFD, syscall.EPOLL_CTL_MOD, int(fd), &e); err != nil { + return err + } + } + + return nil +} + +// addFD adds an FD to the list of FDs observed by n. +func (n *notifier) addFD(fd int32, queue *waiter.Queue) { + n.mu.Lock() + defer n.mu.Unlock() + + // Panic if we're already notifying on this FD. + if _, ok := n.fdMap[fd]; ok { + panic(fmt.Sprintf("File descriptor %v added twice", fd)) + } + + // We have nothing to wait for at the moment. Just add it to the map. + n.fdMap[fd] = &fdInfo{queue: queue} +} + +// updateFD updates the set of events the fd needs to be notified on. +func (n *notifier) updateFD(fd int32) error { + n.mu.Lock() + defer n.mu.Unlock() + + if fi, ok := n.fdMap[fd]; ok { + return n.waitFD(fd, fi, fi.queue.Events()) + } + + return nil +} + +// RemoveFD removes an FD from the list of FDs observed by n. +func (n *notifier) removeFD(fd int32) { + n.mu.Lock() + defer n.mu.Unlock() + + // Remove from map, then from epoll object. + n.waitFD(fd, n.fdMap[fd], 0) + delete(n.fdMap, fd) +} + +// hasFD returns true if the fd is in the list of observed FDs. +func (n *notifier) hasFD(fd int32) bool { + n.mu.Lock() + defer n.mu.Unlock() + + _, ok := n.fdMap[fd] + return ok +} + +// waitAndNotify run is its own goroutine and loops waiting for io event +// notifications from the epoll object. Once notifications arrive, they are +// dispatched to the registered queue. +func (n *notifier) waitAndNotify() error { + e := make([]syscall.EpollEvent, 100) + for { + v, err := epollWait(n.epFD, e, -1) + if err == syscall.EINTR { + continue + } + + if err != nil { + return err + } + + n.mu.Lock() + for i := 0; i < v; i++ { + if fi, ok := n.fdMap[e[i].Fd]; ok { + fi.queue.Notify(waiter.EventMask(e[i].Events)) + } + } + n.mu.Unlock() + } +} + +var shared struct { + notifier *notifier + once sync.Once + initErr error +} + +// AddFD adds an FD to the list of observed FDs. +func AddFD(fd int32, queue *waiter.Queue) error { + shared.once.Do(func() { + shared.notifier, shared.initErr = newNotifier() + }) + + if shared.initErr != nil { + return shared.initErr + } + + shared.notifier.addFD(fd, queue) + return nil +} + +// UpdateFD updates the set of events the fd needs to be notified on. +func UpdateFD(fd int32) error { + return shared.notifier.updateFD(fd) +} + +// RemoveFD removes an FD from the list of observed FDs. +func RemoveFD(fd int32) { + shared.notifier.removeFD(fd) +} + +// HasFD returns true if the FD is in the list of observed FDs. +// +// This should only be used by tests to assert that FDs are correctly registered. +func HasFD(fd int32) bool { + return shared.notifier.hasFD(fd) +} diff --git a/pkg/waiter/fdnotifier/poll_unsafe.go b/pkg/waiter/fdnotifier/poll_unsafe.go new file mode 100644 index 000000000..26bca2b53 --- /dev/null +++ b/pkg/waiter/fdnotifier/poll_unsafe.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fdnotifier + +import ( + "syscall" + "unsafe" + + "gvisor.googlesource.com/gvisor/pkg/waiter" +) + +// NonBlockingPoll polls the given FD in non-blocking fashion. It is used just +// to query the FD's current state. +func NonBlockingPoll(fd int32, mask waiter.EventMask) waiter.EventMask { + e := struct { + fd int32 + events int16 + revents int16 + }{ + fd: fd, + events: int16(mask), + } + + for { + n, _, err := syscall.RawSyscall(syscall.SYS_POLL, uintptr(unsafe.Pointer(&e)), 1, 0) + // Interrupted by signal, try again. + if err == syscall.EINTR { + continue + } + // If an error occur we'll conservatively say the FD is ready for + // whatever is being checked. + if err != 0 { + return mask + } + + // If no FDs were returned, it wasn't ready for anything. + if n == 0 { + return 0 + } + + // Otherwise we got the ready events in the revents field. + return waiter.EventMask(e.revents) + } +} + +// epollWait performs a blocking wait on epfd. +// +// Preconditions: +// * len(events) > 0 +func epollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + if len(events) == 0 { + panic("Empty events passed to EpollWait") + } + + // We actually use epoll_pwait with NULL sigmask instead of epoll_wait + // since that is what the Go >= 1.11 runtime prefers. + r, _, e := syscall.Syscall6(syscall.SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(unsafe.Pointer(&events[0])), uintptr(len(events)), uintptr(msec), 0, 0) + if e != 0 { + return 0, e + } + return int(r), nil +} diff --git a/pkg/waiter/waiter.go b/pkg/waiter/waiter.go new file mode 100644 index 000000000..56f53f9c3 --- /dev/null +++ b/pkg/waiter/waiter.go @@ -0,0 +1,226 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waiter provides the implementation of a wait queue, where waiters can +// be enqueued to be notified when an event of interest happens. +// +// Becoming readable and/or writable are examples of events. Waiters are +// expected to use a pattern similar to this to make a blocking function out of +// a non-blocking one: +// +// func (o *object) blockingRead(...) error { +// err := o.nonBlockingRead(...) +// if err != ErrAgain { +// // Completed with no need to wait! +// return err +// } +// +// e := createOrGetWaiterEntry(...) +// o.EventRegister(&e, waiter.EventIn) +// defer o.EventUnregister(&e) +// +// // We need to try to read again after registration because the +// // object may have become readable between the last attempt to +// // read and read registration. +// err = o.nonBlockingRead(...) +// for err == ErrAgain { +// wait() +// err = o.nonBlockingRead(...) +// } +// +// return err +// } +// +// Another goroutine needs to notify waiters when events happen. For example: +// +// func (o *object) Write(...) ... { +// // Do write work. +// [...] +// +// if oldDataAvailableSize == 0 && dataAvailableSize > 0 { +// // If no data was available and now some data is +// // available, the object became readable, so notify +// // potential waiters about this. +// o.Notify(waiter.EventIn) +// } +// } +package waiter + +import ( + "sync" + + "gvisor.googlesource.com/gvisor/pkg/ilist" +) + +// EventMask represents io events as used in the poll() syscall. +type EventMask uint16 + +// Events that waiters can wait on. The meaning is the same as those in the +// poll() syscall. +const ( + EventIn EventMask = 0x01 // syscall.EPOLLIN + EventPri EventMask = 0x02 // syscall.EPOLLPRI + EventOut EventMask = 0x04 // syscall.EPOLLOUT + EventErr EventMask = 0x08 // syscall.EPOLLERR + EventHUp EventMask = 0x10 // syscall.EPOLLHUP + EventNVal EventMask = 0x20 // Not defined in syscall. +) + +// Waitable contains the methods that need to be implemented by waitable +// objects. +type Waitable interface { + // Readiness returns what the object is currently ready for. If it's + // not ready for a desired purpose, the caller may use EventRegister and + // EventUnregister to get notifications once the object becomes ready. + // + // Implementations should allow for events like EventHUp and EventErr + // to be returned regardless of whether they are in the input EventMask. + Readiness(mask EventMask) EventMask + + // EventRegister registers the given waiter entry to receive + // notifications when an event occurs that makes the object ready for + // at least one of the events in mask. + EventRegister(e *Entry, mask EventMask) + + // EventUnregister unregisters a waiter entry previously registered with + // EventRegister(). + EventUnregister(e *Entry) +} + +// EntryCallback provides a notify callback. +type EntryCallback interface { + // Callback is the function to be called when the waiter entry is + // notified. It is responsible for doing whatever is needed to wake up + // the waiter. + // + // The callback is supposed to perform minimal work, and cannot call + // any method on the queue itself because it will be locked while the + // callback is running. + Callback(e *Entry) +} + +// Entry represents a waiter that can be add to the a wait queue. It can +// only be in one queue at a time, and is added "intrusively" to the queue with +// no extra memory allocations. +type Entry struct { + // Context stores any state the waiter may wish to store in the entry + // itself, which may be used at wake up time. + // + // Note that use of this field is optional and state may alternatively be + // stored in the callback itself. + Context interface{} + + Callback EntryCallback + + // The following fields are protected by the queue lock. + mask EventMask + ilist.Entry +} + +type channelCallback struct{} + +// Callback implements EntryCallback.Callback. +func (*channelCallback) Callback(e *Entry) { + ch := e.Context.(chan struct{}) + select { + case ch <- struct{}{}: + default: + } +} + +// NewChannelEntry initializes a new Entry that does a non-blocking write to a +// struct{} channel when the callback is called. It returns the new Entry +// instance and the channel being used. +// +// If a channel isn't specified (i.e., if "c" is nil), then NewChannelEntry +// allocates a new channel. +func NewChannelEntry(c chan struct{}) (Entry, chan struct{}) { + if c == nil { + c = make(chan struct{}, 1) + } + + return Entry{Context: c, Callback: &channelCallback{}}, c +} + +// Queue represents the wait queue where waiters can be added and +// notifiers can notify them when events happen. +// +// The zero value for waiter.Queue is an empty queue ready for use. +type Queue struct { + list ilist.List `state:"zerovalue"` + mu sync.RWMutex `state:"nosave"` +} + +// EventRegister adds a waiter to the wait queue; the waiter will be notified +// when at least one of the events specified in mask happens. +func (q *Queue) EventRegister(e *Entry, mask EventMask) { + q.mu.Lock() + e.mask = mask + q.list.PushBack(e) + q.mu.Unlock() +} + +// EventUnregister removes the given waiter entry from the wait queue. +func (q *Queue) EventUnregister(e *Entry) { + q.mu.Lock() + q.list.Remove(e) + q.mu.Unlock() +} + +// Notify notifies all waiters in the queue whose masks have at least one bit +// in common with the notification mask. +func (q *Queue) Notify(mask EventMask) { + q.mu.RLock() + for it := q.list.Front(); it != nil; it = it.Next() { + e := it.(*Entry) + if (mask & e.mask) != 0 { + e.Callback.Callback(e) + } + } + q.mu.RUnlock() +} + +// Events returns the set of events being waited on. It is the union of the +// masks of all registered entries. +func (q *Queue) Events() EventMask { + ret := EventMask(0) + + q.mu.RLock() + for it := q.list.Front(); it != nil; it = it.Next() { + e := it.(*Entry) + ret |= e.mask + } + q.mu.RUnlock() + + return ret +} + +// IsEmpty returns if the wait queue is empty or not. +func (q *Queue) IsEmpty() bool { + q.mu.Lock() + defer q.mu.Unlock() + + return q.list.Front() == nil +} + +// AlwaysReady implements the Waitable interface but is always ready. Embedding +// this struct into another struct makes it implement the boilerplate empty +// functions automatically. +type AlwaysReady struct { +} + +// Readiness always returns the input mask because this object is always ready. +func (*AlwaysReady) Readiness(mask EventMask) EventMask { + return mask +} + +// EventRegister doesn't do anything because this object doesn't need to issue +// notifications because its readiness never changes. +func (*AlwaysReady) EventRegister(*Entry, EventMask) { +} + +// EventUnregister doesn't do anything because this object doesn't need to issue +// notifications because its readiness never changes. +func (*AlwaysReady) EventUnregister(e *Entry) { +} diff --git a/pkg/waiter/waiter_test.go b/pkg/waiter/waiter_test.go new file mode 100644 index 000000000..1a203350b --- /dev/null +++ b/pkg/waiter/waiter_test.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Netstack Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package waiter + +import ( + "sync/atomic" + "testing" +) + +type callbackStub struct { + f func(e *Entry) +} + +// Callback implements EntryCallback.Callback. +func (c *callbackStub) Callback(e *Entry) { + c.f(e) +} + +func TestEmptyQueue(t *testing.T) { + var q Queue + + // Notify the zero-value of a queue. + q.Notify(EventIn) + + // Register then unregister a waiter, then notify the queue. + cnt := 0 + e := Entry{Callback: &callbackStub{func(*Entry) { cnt++ }}} + q.EventRegister(&e, EventIn) + q.EventUnregister(&e) + q.Notify(EventIn) + if cnt != 0 { + t.Errorf("Callback was called when it shouldn't have been") + } +} + +func TestMask(t *testing.T) { + // Register a waiter. + var q Queue + var cnt int + e := Entry{Callback: &callbackStub{func(*Entry) { cnt++ }}} + q.EventRegister(&e, EventIn|EventErr) + + // Notify with an overlapping mask. + cnt = 0 + q.Notify(EventIn | EventOut) + if cnt != 1 { + t.Errorf("Callback wasn't called when it should have been") + } + + // Notify with a subset mask. + cnt = 0 + q.Notify(EventIn) + if cnt != 1 { + t.Errorf("Callback wasn't called when it should have been") + } + + // Notify with a superset mask. + cnt = 0 + q.Notify(EventIn | EventErr | EventOut) + if cnt != 1 { + t.Errorf("Callback wasn't called when it should have been") + } + + // Notify with the exact same mask. + cnt = 0 + q.Notify(EventIn | EventErr) + if cnt != 1 { + t.Errorf("Callback wasn't called when it should have been") + } + + // Notify with a disjoint mask. + cnt = 0 + q.Notify(EventOut | EventHUp) + if cnt != 0 { + t.Errorf("Callback was called when it shouldn't have been") + } +} + +func TestConcurrentRegistration(t *testing.T) { + var q Queue + var cnt int + const concurrency = 1000 + + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + ch3 := make(chan struct{}) + + // Create goroutines that will all register/unregister concurrently. + for i := 0; i < concurrency; i++ { + go func() { + var e Entry + e.Callback = &callbackStub{func(entry *Entry) { + cnt++ + if entry != &e { + t.Errorf("entry = %p, want %p", entry, &e) + } + }} + + // Wait for notification, then register. + <-ch1 + q.EventRegister(&e, EventIn|EventErr) + + // Tell main goroutine that we're done registering. + ch2 <- struct{}{} + + // Wait for notification, then unregister. + <-ch3 + q.EventUnregister(&e) + + // Tell main goroutine that we're done unregistering. + ch2 <- struct{}{} + }() + } + + // Let the goroutines register. + close(ch1) + for i := 0; i < concurrency; i++ { + <-ch2 + } + + // Issue a notification. + q.Notify(EventIn) + if cnt != concurrency { + t.Errorf("cnt = %d, want %d", cnt, concurrency) + } + + // Let the goroutine unregister. + close(ch3) + for i := 0; i < concurrency; i++ { + <-ch2 + } + + // Issue a notification. + q.Notify(EventIn) + if cnt != concurrency { + t.Errorf("cnt = %d, want %d", cnt, concurrency) + } +} + +func TestConcurrentNotification(t *testing.T) { + var q Queue + var cnt int32 + const concurrency = 1000 + const waiterCount = 1000 + + // Register waiters. + for i := 0; i < waiterCount; i++ { + var e Entry + e.Callback = &callbackStub{func(entry *Entry) { + atomic.AddInt32(&cnt, 1) + if entry != &e { + t.Errorf("entry = %p, want %p", entry, &e) + } + }} + + q.EventRegister(&e, EventIn|EventErr) + } + + // Launch notifiers. + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + for i := 0; i < concurrency; i++ { + go func() { + <-ch1 + q.Notify(EventIn) + ch2 <- struct{}{} + }() + } + + // Let notifiers go. + close(ch1) + for i := 0; i < concurrency; i++ { + <-ch2 + } + + // Check the count. + if cnt != concurrency*waiterCount { + t.Errorf("cnt = %d, want %d", cnt, concurrency*waiterCount) + } +} |