From 6e1f51f3eb44bdee85c50d075e750e857adef9fd Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Mon, 3 Jun 2019 13:30:51 -0700 Subject: Remove duplicate socket tests socket_unix_abstract.cc: Subset of socket_abstract.cc socket_unix_filesystem.cc: Subset of socket_filesystem.cc PiperOrigin-RevId: 251297117 --- test/syscalls/linux/BUILD | 33 ------------------------ test/syscalls/linux/socket_unix_abstract.cc | 37 --------------------------- test/syscalls/linux/socket_unix_filesystem.cc | 37 --------------------------- 3 files changed, 107 deletions(-) delete mode 100644 test/syscalls/linux/socket_unix_abstract.cc delete mode 100644 test/syscalls/linux/socket_unix_filesystem.cc (limited to 'test/syscalls/linux') diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD index 8465e5ad0..ba9fd6d1f 100644 --- a/test/syscalls/linux/BUILD +++ b/test/syscalls/linux/BUILD @@ -2613,22 +2613,6 @@ cc_binary( ], ) -cc_binary( - name = "socket_unix_abstract_test", - testonly = 1, - srcs = [ - "socket_unix_abstract.cc", - ], - linkstatic = 1, - deps = [ - ":socket_test_util", - ":socket_unix_test_cases", - ":unix_domain_socket_test_util", - "//test/util:test_main", - "//test/util:test_util", - ], -) - cc_binary( name = "socket_unix_unbound_dgram_test", testonly = 1, @@ -2671,23 +2655,6 @@ cc_binary( ], ) -cc_binary( - name = "socket_unix_filesystem_test", - testonly = 1, - srcs = [ - "socket_unix_filesystem.cc", - ], - linkstatic = 1, - deps = [ - ":socket_test_util", - ":socket_unix_test_cases", - ":unix_domain_socket_test_util", - "//test/util:test_main", - "//test/util:test_util", - "@com_google_googletest//:gtest", - ], -) - cc_binary( name = "socket_blocking_local_test", testonly = 1, diff --git a/test/syscalls/linux/socket_unix_abstract.cc b/test/syscalls/linux/socket_unix_abstract.cc deleted file mode 100644 index 8241bf997..000000000 --- a/test/syscalls/linux/socket_unix_abstract.cc +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "test/syscalls/linux/socket_test_util.h" -#include "test/syscalls/linux/socket_unix.h" -#include "test/syscalls/linux/unix_domain_socket_test_util.h" -#include "test/util/test_util.h" - -namespace gvisor { -namespace testing { - -std::vector GetSocketPairs() { - return ApplyVec( - AbstractBoundUnixDomainSocketPair, - AllBitwiseCombinations(List{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET}, - List{0, SOCK_NONBLOCK})); -} - -INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixSocketPairTest, - ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); - -} // namespace testing -} // namespace gvisor diff --git a/test/syscalls/linux/socket_unix_filesystem.cc b/test/syscalls/linux/socket_unix_filesystem.cc deleted file mode 100644 index 5dbe67773..000000000 --- a/test/syscalls/linux/socket_unix_filesystem.cc +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include "test/syscalls/linux/socket_test_util.h" -#include "test/syscalls/linux/socket_unix.h" -#include "test/syscalls/linux/unix_domain_socket_test_util.h" -#include "test/util/test_util.h" - -namespace gvisor { -namespace testing { - -std::vector GetSocketPairs() { - return ApplyVec( - FilesystemBoundUnixDomainSocketPair, - AllBitwiseCombinations(List{SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET}, - List{0, SOCK_NONBLOCK})); -} - -INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixSocketPairTest, - ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); - -} // namespace testing -} // namespace gvisor -- cgit v1.2.3 From 90a116890fcea9fd39911bae854e4e67608a141d Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Mon, 3 Jun 2019 21:47:09 -0700 Subject: gvisor/sock/unix: pass creds when a message is sent between unconnected sockets and don't report a sender address if it doesn't have one PiperOrigin-RevId: 251371284 --- pkg/sentry/fs/gofer/socket.go | 5 +++++ pkg/sentry/socket/control/control.go | 12 ++++++++++-- pkg/sentry/socket/unix/transport/unix.go | 4 ++++ pkg/sentry/socket/unix/unix.go | 6 +++++- test/syscalls/linux/accept_bind.cc | 14 +------------- test/syscalls/linux/socket_unix_unbound_dgram.cc | 24 ++++++++++++++++++++++++ 6 files changed, 49 insertions(+), 16 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/sentry/fs/gofer/socket.go b/pkg/sentry/fs/gofer/socket.go index cbd5b9a84..7376fd76f 100644 --- a/pkg/sentry/fs/gofer/socket.go +++ b/pkg/sentry/fs/gofer/socket.go @@ -139,3 +139,8 @@ func (e *endpoint) UnidirectionalConnect() (transport.ConnectedEndpoint, *syserr func (e *endpoint) Release() { e.inode.DecRef() } + +// Passcred implements transport.BoundEndpoint.Passcred. +func (e *endpoint) Passcred() bool { + return false +} diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go index c0238691d..434d7ca2e 100644 --- a/pkg/sentry/socket/control/control.go +++ b/pkg/sentry/socket/control/control.go @@ -406,12 +406,20 @@ func makeCreds(t *kernel.Task, socketOrEndpoint interface{}) SCMCredentials { return nil } if cr, ok := socketOrEndpoint.(transport.Credentialer); ok && (cr.Passcred() || cr.ConnectedPasscred()) { - tcred := t.Credentials() - return &scmCredentials{t, tcred.EffectiveKUID, tcred.EffectiveKGID} + return MakeCreds(t) } return nil } +// MakeCreds creates default SCMCredentials. +func MakeCreds(t *kernel.Task) SCMCredentials { + if t == nil { + return nil + } + tcred := t.Credentials() + return &scmCredentials{t, tcred.EffectiveKUID, tcred.EffectiveKGID} +} + // New creates default control messages if needed. func New(t *kernel.Task, socketOrEndpoint interface{}, rights SCMRights) transport.ControlMessages { return transport.ControlMessages{ diff --git a/pkg/sentry/socket/unix/transport/unix.go b/pkg/sentry/socket/unix/transport/unix.go index b734b4c20..37d82bb6b 100644 --- a/pkg/sentry/socket/unix/transport/unix.go +++ b/pkg/sentry/socket/unix/transport/unix.go @@ -237,6 +237,10 @@ type BoundEndpoint interface { // endpoint. UnidirectionalConnect() (ConnectedEndpoint, *syserr.Error) + // Passcred returns whether or not the SO_PASSCRED socket option is + // enabled on this end. + Passcred() bool + // Release releases any resources held by the BoundEndpoint. It must be // called before dropping all references to a BoundEndpoint returned by a // function. diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go index 1414be0c6..388cc0d8b 100644 --- a/pkg/sentry/socket/unix/unix.go +++ b/pkg/sentry/socket/unix/unix.go @@ -385,6 +385,10 @@ func (s *SocketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to [] } defer ep.Release() w.To = ep + + if ep.Passcred() && w.Control.Credentials == nil { + w.Control.Credentials = control.MakeCreds(t) + } } n, err := src.CopyInTo(t, &w) @@ -516,7 +520,7 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags if n, err := dst.CopyOutFrom(t, &r); err != syserror.ErrWouldBlock || dontWait { var from interface{} var fromLen uint32 - if r.From != nil { + if r.From != nil && len([]byte(r.From.Addr)) != 0 { from, fromLen = epsocket.ConvertAddress(linux.AF_UNIX, *r.From) } diff --git a/test/syscalls/linux/accept_bind.cc b/test/syscalls/linux/accept_bind.cc index 56377feab..1122ea240 100644 --- a/test/syscalls/linux/accept_bind.cc +++ b/test/syscalls/linux/accept_bind.cc @@ -448,19 +448,7 @@ TEST_P(AllSocketPairTest, UnboundSenderAddr) { RetryEINTR(recvfrom)(accepted_fd.get(), &i, sizeof(i), 0, reinterpret_cast(&addr), &addr_len), SyscallSucceedsWithValue(sizeof(i))); - if (!IsRunningOnGvisor()) { - // Linux returns a zero length for addresses from recvfrom(2) and - // recvmsg(2). This differs from the behavior of getpeername(2) and - // getsockname(2). For simplicity, we use the getpeername(2) and - // getsockname(2) behavior for recvfrom(2) and recvmsg(2). - EXPECT_EQ(addr_len, 0); - return; - } - EXPECT_EQ(addr_len, 2); - EXPECT_EQ( - memcmp(&addr, sockets->second_addr(), - std::min((size_t)addr_len, (size_t)sockets->second_addr_len())), - 0); + EXPECT_EQ(addr_len, 0); } TEST_P(AllSocketPairTest, BoundSenderAddr) { diff --git a/test/syscalls/linux/socket_unix_unbound_dgram.cc b/test/syscalls/linux/socket_unix_unbound_dgram.cc index 2ddc5c11f..52aef891f 100644 --- a/test/syscalls/linux/socket_unix_unbound_dgram.cc +++ b/test/syscalls/linux/socket_unix_unbound_dgram.cc @@ -13,7 +13,9 @@ // limitations under the License. #include +#include #include + #include "gtest/gtest.h" #include "gtest/gtest.h" #include "test/syscalls/linux/socket_test_util.h" @@ -142,6 +144,28 @@ TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnect) { SyscallSucceedsWithValue(sizeof(data))); } +TEST_P(UnboundDgramUnixSocketPairTest, SendtoWithoutConnectPassCreds) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + SetSoPassCred(sockets->first_fd()); + char data = 'a'; + ASSERT_THAT( + RetryEINTR(sendto)(sockets->second_fd(), &data, sizeof(data), 0, + sockets->first_addr(), sockets->first_addr_size()), + SyscallSucceedsWithValue(sizeof(data))); + ucred creds; + creds.pid = -1; + char buf[sizeof(data) + 1]; + ASSERT_NO_FATAL_FAILURE( + RecvCreds(sockets->first_fd(), &creds, buf, sizeof(buf), sizeof(data))); + EXPECT_EQ(0, memcmp(&data, buf, sizeof(data))); + EXPECT_THAT(getpid(), SyscallSucceedsWithValue(creds.pid)); +} + INSTANTIATE_TEST_SUITE_P( AllUnixDomainSockets, UnboundDgramUnixSocketPairTest, ::testing::ValuesIn(VecCat( -- cgit v1.2.3 From d3ed9baac0dc967eaf6d3e3f986cafe60604121a Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 5 Jun 2019 13:59:01 -0700 Subject: Implement dumpability tracking and checks We don't actually support core dumps, but some applications want to get/set dumpability, which still has an effect in procfs. Lack of support for set-uid binaries or fs creds simplifies things a bit. As-is, processes started via CreateProcess (i.e., init and sentryctl exec) have normal dumpability. I'm a bit torn on whether sentryctl exec tasks should be dumpable, but at least since they have no parent normal UID/GID checks should protect them. PiperOrigin-RevId: 251712714 --- pkg/abi/linux/prctl.go | 7 +++++ pkg/sentry/fs/proc/inode.go | 40 ++++++++++++++++++++++-- pkg/sentry/fs/proc/task.go | 17 +++++++++- pkg/sentry/kernel/ptrace.go | 17 +++++++++- pkg/sentry/kernel/task_exec.go | 7 +++++ pkg/sentry/kernel/task_identity.go | 24 ++++++++++++-- pkg/sentry/mm/lifecycle.go | 6 ++-- pkg/sentry/mm/metadata.go | 30 ++++++++++++++++++ pkg/sentry/mm/mm.go | 6 ++++ pkg/sentry/syscalls/linux/sys_prctl.go | 33 ++++++++++++++++++-- test/syscalls/linux/BUILD | 1 + test/syscalls/linux/prctl.cc | 34 ++++++++++++++++++++ test/syscalls/linux/proc.cc | 57 ++++++++++++++++++++++++++++++++++ 13 files changed, 268 insertions(+), 11 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/abi/linux/prctl.go b/pkg/abi/linux/prctl.go index 0428282dd..391cfaa1c 100644 --- a/pkg/abi/linux/prctl.go +++ b/pkg/abi/linux/prctl.go @@ -155,3 +155,10 @@ const ( ARCH_GET_GS = 0x1004 ARCH_SET_CPUID = 0x1012 ) + +// Flags for prctl(PR_SET_DUMPABLE), defined in include/linux/sched/coredump.h. +const ( + SUID_DUMP_DISABLE = 0 + SUID_DUMP_USER = 1 + SUID_DUMP_ROOT = 2 +) diff --git a/pkg/sentry/fs/proc/inode.go b/pkg/sentry/fs/proc/inode.go index 379569823..986bc0a45 100644 --- a/pkg/sentry/fs/proc/inode.go +++ b/pkg/sentry/fs/proc/inode.go @@ -21,11 +21,14 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" "gvisor.googlesource.com/gvisor/pkg/sentry/fs/proc/device" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" ) // taskOwnedInodeOps wraps an fs.InodeOperations and overrides the UnstableAttr -// method to return the task as the owner. +// method to return either the task or root as the owner, depending on the +// task's dumpability. // // +stateify savable type taskOwnedInodeOps struct { @@ -41,9 +44,42 @@ func (i *taskOwnedInodeOps) UnstableAttr(ctx context.Context, inode *fs.Inode) ( if err != nil { return fs.UnstableAttr{}, err } - // Set the task owner as the file owner. + + // By default, set the task owner as the file owner. creds := i.t.Credentials() uattr.Owner = fs.FileOwner{creds.EffectiveKUID, creds.EffectiveKGID} + + // Linux doesn't apply dumpability adjustments to world + // readable/executable directories so that applications can stat + // /proc/PID to determine the effective UID of a process. See + // fs/proc/base.c:task_dump_owner. + if fs.IsDir(inode.StableAttr) && uattr.Perms == fs.FilePermsFromMode(0555) { + return uattr, nil + } + + // If the task is not dumpable, then root (in the namespace preferred) + // owns the file. + var m *mm.MemoryManager + i.t.WithMuLocked(func(t *kernel.Task) { + m = t.MemoryManager() + }) + + if m == nil { + uattr.Owner.UID = auth.RootKUID + uattr.Owner.GID = auth.RootKGID + } else if m.Dumpability() != mm.UserDumpable { + if kuid := creds.UserNamespace.MapToKUID(auth.RootUID); kuid.Ok() { + uattr.Owner.UID = kuid + } else { + uattr.Owner.UID = auth.RootKUID + } + if kgid := creds.UserNamespace.MapToKGID(auth.RootGID); kgid.Ok() { + uattr.Owner.GID = kgid + } else { + uattr.Owner.GID = auth.RootKGID + } + } + return uattr, nil } diff --git a/pkg/sentry/fs/proc/task.go b/pkg/sentry/fs/proc/task.go index 77e03d349..21a965f90 100644 --- a/pkg/sentry/fs/proc/task.go +++ b/pkg/sentry/fs/proc/task.go @@ -96,7 +96,7 @@ func (p *proc) newTaskDir(t *kernel.Task, msrc *fs.MountSource, showSubtasks boo contents["cgroup"] = newCGroupInode(t, msrc, p.cgroupControllers) } - // TODO(b/31916171): Set EUID/EGID based on dumpability. + // N.B. taskOwnedInodeOps enforces dumpability-based ownership. d := &taskDir{ Dir: *ramfs.NewDir(t, contents, fs.RootOwner, fs.FilePermsFromMode(0555)), t: t, @@ -667,6 +667,21 @@ func newComm(t *kernel.Task, msrc *fs.MountSource) *fs.Inode { return newProcInode(c, msrc, fs.SpecialFile, t) } +// Check implements fs.InodeOperations.Check. +func (c *comm) Check(ctx context.Context, inode *fs.Inode, p fs.PermMask) bool { + // This file can always be read or written by members of the same + // thread group. See fs/proc/base.c:proc_tid_comm_permission. + // + // N.B. This check is currently a no-op as we don't yet support writing + // and this file is world-readable anyways. + t := kernel.TaskFromContext(ctx) + if t != nil && t.ThreadGroup() == c.t.ThreadGroup() && !p.Execute { + return true + } + + return fs.ContextCanAccessFile(ctx, inode, p) +} + // GetFile implements fs.InodeOperations.GetFile. func (c *comm) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) { return fs.NewFile(ctx, dirent, flags, &commFile{t: c.t}), nil diff --git a/pkg/sentry/kernel/ptrace.go b/pkg/sentry/kernel/ptrace.go index 4423e7efd..193447b17 100644 --- a/pkg/sentry/kernel/ptrace.go +++ b/pkg/sentry/kernel/ptrace.go @@ -19,6 +19,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" "gvisor.googlesource.com/gvisor/pkg/syserror" ) @@ -92,6 +93,14 @@ const ( // ptrace(2), subsection "Ptrace access mode checking". If attach is true, it // checks for access mode PTRACE_MODE_ATTACH; otherwise, it checks for access // mode PTRACE_MODE_READ. +// +// NOTE(b/30815691): The result of CanTrace is immediately stale (e.g., a +// racing setuid(2) may change traceability). This may pose a risk when a task +// changes from traceable to not traceable. This is only problematic across +// execve, where privileges may increase. +// +// We currently do not implement privileged executables (set-user/group-ID bits +// and file capabilities), so that case is not reachable. func (t *Task) CanTrace(target *Task, attach bool) bool { // "1. If the calling thread and the target thread are in the same thread // group, access is always allowed." - ptrace(2) @@ -162,7 +171,13 @@ func (t *Task) CanTrace(target *Task, attach bool) bool { if cgid := callerCreds.RealKGID; cgid != targetCreds.RealKGID || cgid != targetCreds.EffectiveKGID || cgid != targetCreds.SavedKGID { return false } - // TODO(b/31916171): dumpability check + var targetMM *mm.MemoryManager + target.WithMuLocked(func(t *Task) { + targetMM = t.MemoryManager() + }) + if targetMM != nil && targetMM.Dumpability() != mm.UserDumpable { + return false + } if callerCreds.UserNamespace != targetCreds.UserNamespace { return false } diff --git a/pkg/sentry/kernel/task_exec.go b/pkg/sentry/kernel/task_exec.go index 5d1425d5c..35d5cb90c 100644 --- a/pkg/sentry/kernel/task_exec.go +++ b/pkg/sentry/kernel/task_exec.go @@ -68,6 +68,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/sentry/arch" "gvisor.googlesource.com/gvisor/pkg/sentry/fs" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" "gvisor.googlesource.com/gvisor/pkg/syserror" ) @@ -198,6 +199,12 @@ func (r *runSyscallAfterExecStop) execute(t *Task) taskRunState { return flags.CloseOnExec }) + // NOTE(b/30815691): We currently do not implement privileged + // executables (set-user/group-ID bits and file capabilities). This + // allows us to unconditionally enable user dumpability on the new mm. + // See fs/exec.c:setup_new_exec. + r.tc.MemoryManager.SetDumpability(mm.UserDumpable) + // Switch to the new process. t.MemoryManager().Deactivate() t.mu.Lock() diff --git a/pkg/sentry/kernel/task_identity.go b/pkg/sentry/kernel/task_identity.go index 17f08729a..ec95f78d0 100644 --- a/pkg/sentry/kernel/task_identity.go +++ b/pkg/sentry/kernel/task_identity.go @@ -17,6 +17,7 @@ package kernel import ( "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" "gvisor.googlesource.com/gvisor/pkg/syserror" ) @@ -206,8 +207,17 @@ func (t *Task) setKUIDsUncheckedLocked(newR, newE, newS auth.KUID) { // (filesystem UIDs aren't implemented, nor are any of the capabilities in // question) - // Not documented, but compare Linux's kernel/cred.c:commit_creds(). if oldE != newE { + // "[dumpability] is reset to the current value contained in + // the file /proc/sys/fs/suid_dumpable (which by default has + // the value 0), in the following circumstances: The process's + // effective user or group ID is changed." - prctl(2) + // + // (suid_dumpable isn't implemented, so we just use the + // default. + t.MemoryManager().SetDumpability(mm.NotDumpable) + + // Not documented, but compare Linux's kernel/cred.c:commit_creds(). t.parentDeathSignal = 0 } } @@ -303,8 +313,18 @@ func (t *Task) setKGIDsUncheckedLocked(newR, newE, newS auth.KGID) { t.creds = t.creds.Fork() // See doc for creds. t.creds.RealKGID, t.creds.EffectiveKGID, t.creds.SavedKGID = newR, newE, newS - // Not documented, but compare Linux's kernel/cred.c:commit_creds(). if oldE != newE { + // "[dumpability] is reset to the current value contained in + // the file /proc/sys/fs/suid_dumpable (which by default has + // the value 0), in the following circumstances: The process's + // effective user or group ID is changed." - prctl(2) + // + // (suid_dumpable isn't implemented, so we just use the + // default. + t.MemoryManager().SetDumpability(mm.NotDumpable) + + // Not documented, but compare Linux's + // kernel/cred.c:commit_creds(). t.parentDeathSignal = 0 } } diff --git a/pkg/sentry/mm/lifecycle.go b/pkg/sentry/mm/lifecycle.go index 7a65a62a2..7646d5ab2 100644 --- a/pkg/sentry/mm/lifecycle.go +++ b/pkg/sentry/mm/lifecycle.go @@ -37,6 +37,7 @@ func NewMemoryManager(p platform.Platform, mfp pgalloc.MemoryFileProvider) *Memo privateRefs: &privateRefs{}, users: 1, auxv: arch.Auxv{}, + dumpability: UserDumpable, aioManager: aioManager{contexts: make(map[uint64]*AIOContext)}, } } @@ -79,8 +80,9 @@ func (mm *MemoryManager) Fork(ctx context.Context) (*MemoryManager, error) { envv: mm.envv, auxv: append(arch.Auxv(nil), mm.auxv...), // IncRef'd below, once we know that there isn't an error. - executable: mm.executable, - aioManager: aioManager{contexts: make(map[uint64]*AIOContext)}, + executable: mm.executable, + dumpability: mm.dumpability, + aioManager: aioManager{contexts: make(map[uint64]*AIOContext)}, } // Copy vmas. diff --git a/pkg/sentry/mm/metadata.go b/pkg/sentry/mm/metadata.go index 9768e51f1..c218006ee 100644 --- a/pkg/sentry/mm/metadata.go +++ b/pkg/sentry/mm/metadata.go @@ -20,6 +20,36 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" ) +// Dumpability describes if and how core dumps should be created. +type Dumpability int + +const ( + // NotDumpable indicates that core dumps should never be created. + NotDumpable Dumpability = iota + + // UserDumpable indicates that core dumps should be created, owned by + // the current user. + UserDumpable + + // RootDumpable indicates that core dumps should be created, owned by + // root. + RootDumpable +) + +// Dumpability returns the dumpability. +func (mm *MemoryManager) Dumpability() Dumpability { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + return mm.dumpability +} + +// SetDumpability sets the dumpability. +func (mm *MemoryManager) SetDumpability(d Dumpability) { + mm.metadataMu.Lock() + defer mm.metadataMu.Unlock() + mm.dumpability = d +} + // ArgvStart returns the start of the application argument vector. // // There is no guarantee that this value is sensible w.r.t. ArgvEnd. diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go index eb6defa2b..0a026ff8c 100644 --- a/pkg/sentry/mm/mm.go +++ b/pkg/sentry/mm/mm.go @@ -219,6 +219,12 @@ type MemoryManager struct { // executable is protected by metadataMu. executable *fs.Dirent + // dumpability describes if and how this MemoryManager may be dumped to + // userspace. + // + // dumpability is protected by metadataMu. + dumpability Dumpability + // aioManager keeps track of AIOContexts used for async IOs. AIOManager // must be cloned when CLONE_VM is used. aioManager aioManager diff --git a/pkg/sentry/syscalls/linux/sys_prctl.go b/pkg/sentry/syscalls/linux/sys_prctl.go index 117ae1a0e..1b7e5616b 100644 --- a/pkg/sentry/syscalls/linux/sys_prctl.go +++ b/pkg/sentry/syscalls/linux/sys_prctl.go @@ -15,6 +15,7 @@ package linux import ( + "fmt" "syscall" "gvisor.googlesource.com/gvisor/pkg/abi/linux" @@ -23,6 +24,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/kdefs" + "gvisor.googlesource.com/gvisor/pkg/sentry/mm" ) // Prctl implements linux syscall prctl(2). @@ -44,6 +46,33 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall _, err := t.CopyOut(args[1].Pointer(), int32(t.ParentDeathSignal())) return 0, nil, err + case linux.PR_GET_DUMPABLE: + d := t.MemoryManager().Dumpability() + switch d { + case mm.NotDumpable: + return linux.SUID_DUMP_DISABLE, nil, nil + case mm.UserDumpable: + return linux.SUID_DUMP_USER, nil, nil + case mm.RootDumpable: + return linux.SUID_DUMP_ROOT, nil, nil + default: + panic(fmt.Sprintf("Unknown dumpability %v", d)) + } + + case linux.PR_SET_DUMPABLE: + var d mm.Dumpability + switch args[1].Int() { + case linux.SUID_DUMP_DISABLE: + d = mm.NotDumpable + case linux.SUID_DUMP_USER: + d = mm.UserDumpable + default: + // N.B. Userspace may not pass SUID_DUMP_ROOT. + return 0, nil, syscall.EINVAL + } + t.MemoryManager().SetDumpability(d) + return 0, nil, nil + case linux.PR_GET_KEEPCAPS: if t.Credentials().KeepCaps { return 1, nil, nil @@ -171,9 +200,7 @@ func Prctl(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Syscall } return 0, nil, t.DropBoundingCapability(cp) - case linux.PR_GET_DUMPABLE, - linux.PR_SET_DUMPABLE, - linux.PR_GET_TIMING, + case linux.PR_GET_TIMING, linux.PR_SET_TIMING, linux.PR_GET_TSC, linux.PR_SET_TSC, diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD index ba9fd6d1f..7633ab162 100644 --- a/test/syscalls/linux/BUILD +++ b/test/syscalls/linux/BUILD @@ -1317,6 +1317,7 @@ cc_binary( linkstatic = 1, deps = [ "//test/util:capability_util", + "//test/util:cleanup", "//test/util:multiprocess_util", "//test/util:posix_error", "//test/util:test_util", diff --git a/test/syscalls/linux/prctl.cc b/test/syscalls/linux/prctl.cc index bce42dc74..bd1779557 100644 --- a/test/syscalls/linux/prctl.cc +++ b/test/syscalls/linux/prctl.cc @@ -17,10 +17,12 @@ #include #include #include + #include #include "gtest/gtest.h" #include "test/util/capability_util.h" +#include "test/util/cleanup.h" #include "test/util/multiprocess_util.h" #include "test/util/posix_error.h" #include "test/util/test_util.h" @@ -35,6 +37,16 @@ namespace testing { namespace { +#ifndef SUID_DUMP_DISABLE +#define SUID_DUMP_DISABLE 0 +#endif /* SUID_DUMP_DISABLE */ +#ifndef SUID_DUMP_USER +#define SUID_DUMP_USER 1 +#endif /* SUID_DUMP_USER */ +#ifndef SUID_DUMP_ROOT +#define SUID_DUMP_ROOT 2 +#endif /* SUID_DUMP_ROOT */ + TEST(PrctlTest, NameInitialized) { const size_t name_length = 20; char name[name_length] = {}; @@ -178,6 +190,28 @@ TEST(PrctlTest, InvalidPrSetMM) { ASSERT_THAT(prctl(PR_SET_MM, 0, 0, 0, 0), SyscallFailsWithErrno(EPERM)); } +// Sanity check that dumpability is remembered. +TEST(PrctlTest, SetGetDumpability) { + int before; + ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds()); + auto cleanup = Cleanup([before] { + ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds()); + }); + + EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_DISABLE), SyscallSucceeds()); + EXPECT_THAT(prctl(PR_GET_DUMPABLE), + SyscallSucceedsWithValue(SUID_DUMP_DISABLE)); + + EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_USER), SyscallSucceeds()); + EXPECT_THAT(prctl(PR_GET_DUMPABLE), SyscallSucceedsWithValue(SUID_DUMP_USER)); +} + +// SUID_DUMP_ROOT cannot be set via PR_SET_DUMPABLE. +TEST(PrctlTest, RootDumpability) { + EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_ROOT), + SyscallFailsWithErrno(EINVAL)); +} + } // namespace } // namespace testing diff --git a/test/syscalls/linux/proc.cc b/test/syscalls/linux/proc.cc index ede6fb860..924b98e3a 100644 --- a/test/syscalls/linux/proc.cc +++ b/test/syscalls/linux/proc.cc @@ -69,9 +69,11 @@ // way to get it tested on both gVisor, PTrace and Linux. using ::testing::AllOf; +using ::testing::AnyOf; using ::testing::ContainerEq; using ::testing::Contains; using ::testing::ContainsRegex; +using ::testing::Eq; using ::testing::Gt; using ::testing::HasSubstr; using ::testing::IsSupersetOf; @@ -86,6 +88,16 @@ namespace gvisor { namespace testing { namespace { +#ifndef SUID_DUMP_DISABLE +#define SUID_DUMP_DISABLE 0 +#endif /* SUID_DUMP_DISABLE */ +#ifndef SUID_DUMP_USER +#define SUID_DUMP_USER 1 +#endif /* SUID_DUMP_USER */ +#ifndef SUID_DUMP_ROOT +#define SUID_DUMP_ROOT 2 +#endif /* SUID_DUMP_ROOT */ + // O_LARGEFILE as defined by Linux. glibc tries to be clever by setting it to 0 // because "it isn't needed", even though Linux can return it via F_GETFL. constexpr int kOLargeFile = 00100000; @@ -1896,6 +1908,51 @@ void CheckDuplicatesRecursively(std::string path) { TEST(Proc, NoDuplicates) { CheckDuplicatesRecursively("/proc"); } +// Most /proc/PID files are owned by the task user with SUID_DUMP_USER. +TEST(ProcPid, UserDumpableOwner) { + int before; + ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds()); + auto cleanup = Cleanup([before] { + ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds()); + }); + + EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_USER), SyscallSucceeds()); + + // This applies to the task directory itself and files inside. + struct stat st; + ASSERT_THAT(stat("/proc/self/", &st), SyscallSucceeds()); + EXPECT_EQ(st.st_uid, geteuid()); + EXPECT_EQ(st.st_gid, getegid()); + + ASSERT_THAT(stat("/proc/self/stat", &st), SyscallSucceeds()); + EXPECT_EQ(st.st_uid, geteuid()); + EXPECT_EQ(st.st_gid, getegid()); +} + +// /proc/PID files are owned by root with SUID_DUMP_DISABLE. +TEST(ProcPid, RootDumpableOwner) { + int before; + ASSERT_THAT(before = prctl(PR_GET_DUMPABLE), SyscallSucceeds()); + auto cleanup = Cleanup([before] { + ASSERT_THAT(prctl(PR_SET_DUMPABLE, before), SyscallSucceeds()); + }); + + EXPECT_THAT(prctl(PR_SET_DUMPABLE, SUID_DUMP_DISABLE), SyscallSucceeds()); + + // This *does not* applies to the task directory itself (or other 0555 + // directories), but does to files inside. + struct stat st; + ASSERT_THAT(stat("/proc/self/", &st), SyscallSucceeds()); + EXPECT_EQ(st.st_uid, geteuid()); + EXPECT_EQ(st.st_gid, getegid()); + + // This file is owned by root. Also allow nobody in case this test is running + // in a userns without root mapped. + ASSERT_THAT(stat("/proc/self/stat", &st), SyscallSucceeds()); + EXPECT_THAT(st.st_uid, AnyOf(Eq(0), Eq(65534))); + EXPECT_THAT(st.st_gid, AnyOf(Eq(0), Eq(65534))); +} + } // namespace } // namespace testing } // namespace gvisor -- cgit v1.2.3 From c08fcaa364e917b19aad0f74a8e3a1c700d0bfcc Mon Sep 17 00:00:00 2001 From: Ian Gudger Date: Wed, 5 Jun 2019 15:56:21 -0700 Subject: Give test instantiations meaningful names. PiperOrigin-RevId: 251737069 --- test/syscalls/linux/socket_abstract.cc | 4 ++-- test/syscalls/linux/socket_filesystem.cc | 4 ++-- test/syscalls/linux/socket_ip_loopback_blocking.cc | 2 +- test/syscalls/linux/socket_ip_tcp_generic_loopback.cc | 2 +- test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc | 2 +- test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc | 2 +- test/syscalls/linux/socket_ip_tcp_udp_generic.cc | 2 +- test/syscalls/linux/socket_ip_udp_loopback.cc | 6 +++--- test/syscalls/linux/socket_ip_udp_loopback_blocking.cc | 2 +- test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc | 2 +- .../linux/socket_ipv4_tcp_unbound_external_networking_test.cc | 2 +- .../linux/socket_ipv4_udp_unbound_external_networking_test.cc | 2 +- test/syscalls/linux/socket_unix_abstract_nonblock.cc | 2 +- test/syscalls/linux/socket_unix_blocking_local.cc | 2 +- test/syscalls/linux/socket_unix_dgram_local.cc | 6 +++--- test/syscalls/linux/socket_unix_dgram_non_blocking.cc | 2 +- test/syscalls/linux/socket_unix_filesystem_nonblock.cc | 2 +- test/syscalls/linux/socket_unix_non_stream_blocking_local.cc | 2 +- test/syscalls/linux/socket_unix_pair_nonblock.cc | 2 +- test/syscalls/linux/socket_unix_seqpacket_local.cc | 6 +++--- test/syscalls/linux/socket_unix_stream_blocking_local.cc | 2 +- test/syscalls/linux/socket_unix_stream_local.cc | 2 +- test/syscalls/linux/socket_unix_stream_nonblock_local.cc | 2 +- 23 files changed, 31 insertions(+), 31 deletions(-) (limited to 'test/syscalls/linux') diff --git a/test/syscalls/linux/socket_abstract.cc b/test/syscalls/linux/socket_abstract.cc index 2faf678f7..503ba986b 100644 --- a/test/syscalls/linux/socket_abstract.cc +++ b/test/syscalls/linux/socket_abstract.cc @@ -31,11 +31,11 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, AllSocketPairTest, + AbstractUnixSockets, AllSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixSocketPairTest, + AbstractUnixSockets, UnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_filesystem.cc b/test/syscalls/linux/socket_filesystem.cc index f7cb72df4..e38a320f6 100644 --- a/test/syscalls/linux/socket_filesystem.cc +++ b/test/syscalls/linux/socket_filesystem.cc @@ -31,11 +31,11 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, AllSocketPairTest, + FilesystemUnixSockets, AllSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixSocketPairTest, + FilesystemUnixSockets, UnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_loopback_blocking.cc b/test/syscalls/linux/socket_ip_loopback_blocking.cc index d7fc20aad..d7fc9715b 100644 --- a/test/syscalls/linux/socket_ip_loopback_blocking.cc +++ b/test/syscalls/linux/socket_ip_loopback_blocking.cc @@ -39,7 +39,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingSocketPairTest, + BlockingIPSockets, BlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc b/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc index 2c6ae17bf..0dc274e2d 100644 --- a/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc +++ b/test/syscalls/linux/socket_ip_tcp_generic_loopback.cc @@ -35,7 +35,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, TCPSocketPairTest, + AllTCPSockets, TCPSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc b/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc index d1ea8ef12..cd3ad97d0 100644 --- a/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc +++ b/test/syscalls/linux/socket_ip_tcp_loopback_blocking.cc @@ -35,7 +35,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingStreamSocketPairTest, + BlockingTCPSockets, BlockingStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc b/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc index 96c1b3b3d..1acdecc17 100644 --- a/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc +++ b/test/syscalls/linux/socket_ip_tcp_loopback_nonblock.cc @@ -34,7 +34,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingSocketPairTest, + NonBlockingTCPSockets, NonBlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_tcp_udp_generic.cc b/test/syscalls/linux/socket_ip_tcp_udp_generic.cc index 251817a9f..de63f79d9 100644 --- a/test/syscalls/linux/socket_ip_tcp_udp_generic.cc +++ b/test/syscalls/linux/socket_ip_tcp_udp_generic.cc @@ -69,7 +69,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllTCPSockets, TcpUdpSocketPairTest, + AllIPSockets, TcpUdpSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace diff --git a/test/syscalls/linux/socket_ip_udp_loopback.cc b/test/syscalls/linux/socket_ip_udp_loopback.cc index fc124e9ef..1df74a348 100644 --- a/test/syscalls/linux/socket_ip_udp_loopback.cc +++ b/test/syscalls/linux/socket_ip_udp_loopback.cc @@ -33,15 +33,15 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, AllSocketPairTest, + AllUDPSockets, AllSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonStreamSocketPairTest, + AllUDPSockets, NonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - UDPSockets, UDPSocketPairTest, + AllUDPSockets, UDPSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc b/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc index 1c3d1c0ad..1e259efa7 100644 --- a/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc +++ b/test/syscalls/linux/socket_ip_udp_loopback_blocking.cc @@ -30,7 +30,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingNonStreamSocketPairTest, + BlockingUDPSockets, BlockingNonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc b/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc index 7554b08d5..74cbd326d 100644 --- a/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc +++ b/test/syscalls/linux/socket_ip_udp_loopback_nonblock.cc @@ -30,7 +30,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingSocketPairTest, + NonBlockingUDPSockets, NonBlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_ipv4_tcp_unbound_external_networking_test.cc b/test/syscalls/linux/socket_ipv4_tcp_unbound_external_networking_test.cc index 040bb176e..92f03e045 100644 --- a/test/syscalls/linux/socket_ipv4_tcp_unbound_external_networking_test.cc +++ b/test/syscalls/linux/socket_ipv4_tcp_unbound_external_networking_test.cc @@ -28,7 +28,7 @@ std::vector GetSockets() { AllBitwiseCombinations(List{0, SOCK_NONBLOCK})); } -INSTANTIATE_TEST_SUITE_P(IPv4TCPSockets, +INSTANTIATE_TEST_SUITE_P(IPv4TCPUnboundSockets, IPv4TCPUnboundExternalNetworkingSocketTest, ::testing::ValuesIn(GetSockets())); } // namespace testing diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc index ffbb8e6eb..9d4e1ab97 100644 --- a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc +++ b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking_test.cc @@ -28,7 +28,7 @@ std::vector GetSockets() { AllBitwiseCombinations(List{0, SOCK_NONBLOCK})); } -INSTANTIATE_TEST_SUITE_P(IPv4UDPSockets, +INSTANTIATE_TEST_SUITE_P(IPv4UDPUnboundSockets, IPv4UDPUnboundExternalNetworkingSocketTest, ::testing::ValuesIn(GetSockets())); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_abstract_nonblock.cc b/test/syscalls/linux/socket_unix_abstract_nonblock.cc index 9de0f6dfe..be31ab2a7 100644 --- a/test/syscalls/linux/socket_unix_abstract_nonblock.cc +++ b/test/syscalls/linux/socket_unix_abstract_nonblock.cc @@ -30,7 +30,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingSocketPairTest, + NonBlockingAbstractUnixSockets, NonBlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_blocking_local.cc b/test/syscalls/linux/socket_unix_blocking_local.cc index 320915b0f..1994139e6 100644 --- a/test/syscalls/linux/socket_unix_blocking_local.cc +++ b/test/syscalls/linux/socket_unix_blocking_local.cc @@ -37,7 +37,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingSocketPairTest, + NonBlockingUnixDomainSockets, BlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_dgram_local.cc b/test/syscalls/linux/socket_unix_dgram_local.cc index 4ba2c80ae..8c5a473bd 100644 --- a/test/syscalls/linux/socket_unix_dgram_local.cc +++ b/test/syscalls/linux/socket_unix_dgram_local.cc @@ -41,15 +41,15 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, DgramUnixSocketPairTest, + DgramUnixSockets, DgramUnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixNonStreamSocketPairTest, + DgramUnixSockets, UnixNonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonStreamSocketPairTest, + DgramUnixSockets, NonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_dgram_non_blocking.cc b/test/syscalls/linux/socket_unix_dgram_non_blocking.cc index 9fe86cee8..707052af8 100644 --- a/test/syscalls/linux/socket_unix_dgram_non_blocking.cc +++ b/test/syscalls/linux/socket_unix_dgram_non_blocking.cc @@ -44,7 +44,7 @@ TEST_P(NonBlockingDgramUnixSocketPairTest, ReadOneSideClosed) { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingDgramUnixSocketPairTest, + NonBlockingDgramUnixSockets, NonBlockingDgramUnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(std::vector{ UnixDomainSocketPair(SOCK_DGRAM | SOCK_NONBLOCK), FilesystemBoundUnixDomainSocketPair(SOCK_DGRAM | SOCK_NONBLOCK), diff --git a/test/syscalls/linux/socket_unix_filesystem_nonblock.cc b/test/syscalls/linux/socket_unix_filesystem_nonblock.cc index 137db53c4..8ba7af971 100644 --- a/test/syscalls/linux/socket_unix_filesystem_nonblock.cc +++ b/test/syscalls/linux/socket_unix_filesystem_nonblock.cc @@ -30,7 +30,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingSocketPairTest, + NonBlockingFilesystemUnixSockets, NonBlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc b/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc index 98cf1fe8a..da762cd83 100644 --- a/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc +++ b/test/syscalls/linux/socket_unix_non_stream_blocking_local.cc @@ -34,7 +34,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingNonStreamSocketPairTest, + BlockingNonStreamUnixSockets, BlockingNonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_pair_nonblock.cc b/test/syscalls/linux/socket_unix_pair_nonblock.cc index 583506f08..3135d325f 100644 --- a/test/syscalls/linux/socket_unix_pair_nonblock.cc +++ b/test/syscalls/linux/socket_unix_pair_nonblock.cc @@ -30,7 +30,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingSocketPairTest, + NonBlockingUnixSockets, NonBlockingSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_seqpacket_local.cc b/test/syscalls/linux/socket_unix_seqpacket_local.cc index b903a9e8f..dff75a532 100644 --- a/test/syscalls/linux/socket_unix_seqpacket_local.cc +++ b/test/syscalls/linux/socket_unix_seqpacket_local.cc @@ -41,15 +41,15 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonStreamSocketPairTest, + SeqpacketUnixSockets, NonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, SeqpacketUnixSocketPairTest, + SeqpacketUnixSockets, SeqpacketUnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, UnixNonStreamSocketPairTest, + SeqpacketUnixSockets, UnixNonStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_stream_blocking_local.cc b/test/syscalls/linux/socket_unix_stream_blocking_local.cc index ce0f1e50d..fa0a9d367 100644 --- a/test/syscalls/linux/socket_unix_stream_blocking_local.cc +++ b/test/syscalls/linux/socket_unix_stream_blocking_local.cc @@ -32,7 +32,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, BlockingStreamSocketPairTest, + BlockingStreamUnixSockets, BlockingStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_stream_local.cc b/test/syscalls/linux/socket_unix_stream_local.cc index 6b840189c..65eef1a81 100644 --- a/test/syscalls/linux/socket_unix_stream_local.cc +++ b/test/syscalls/linux/socket_unix_stream_local.cc @@ -39,7 +39,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, StreamSocketPairTest, + StreamUnixSockets, StreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing diff --git a/test/syscalls/linux/socket_unix_stream_nonblock_local.cc b/test/syscalls/linux/socket_unix_stream_nonblock_local.cc index ebec4e0ec..ec777c59f 100644 --- a/test/syscalls/linux/socket_unix_stream_nonblock_local.cc +++ b/test/syscalls/linux/socket_unix_stream_nonblock_local.cc @@ -31,7 +31,7 @@ std::vector GetSocketPairs() { } INSTANTIATE_TEST_SUITE_P( - AllUnixDomainSockets, NonBlockingStreamSocketPairTest, + NonBlockingStreamUnixSockets, NonBlockingStreamSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); } // namespace testing -- cgit v1.2.3 From d18bb4f38a7a89456dad1f3a0e8ff13a0b65ba7f Mon Sep 17 00:00:00 2001 From: Chris Kuiper Date: Wed, 5 Jun 2019 16:07:18 -0700 Subject: Adjust route when looping multicast packets Multicast packets are special in that their destination address does not identify a specific interface. When sending out such a packet the multicast address is the remote address, but for incoming packets it is the local address. Hence, when looping a multicast packet, the route needs to be tweaked to reflect this. PiperOrigin-RevId: 251739298 --- pkg/tcpip/network/ipv4/ipv4.go | 4 +- pkg/tcpip/network/ipv6/ipv6.go | 4 +- pkg/tcpip/stack/route.go | 10 ++ .../socket_ipv4_udp_unbound_external_networking.cc | 129 +++++++++++++++++++++ 4 files changed, 145 insertions(+), 2 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/tcpip/network/ipv4/ipv4.go b/pkg/tcpip/network/ipv4/ipv4.go index da07a39e5..44b1d5b9b 100644 --- a/pkg/tcpip/network/ipv4/ipv4.go +++ b/pkg/tcpip/network/ipv4/ipv4.go @@ -215,7 +215,9 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, hdr buffer.Prepen views[0] = hdr.View() views = append(views, payload.Views()...) vv := buffer.NewVectorisedView(len(views[0])+payload.Size(), views) - e.HandlePacket(r, vv) + loopedR := r.MakeLoopedRoute() + e.HandlePacket(&loopedR, vv) + loopedR.Release() } if loop&stack.PacketOut == 0 { return nil diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go index 4b8cd496b..bcae98e1f 100644 --- a/pkg/tcpip/network/ipv6/ipv6.go +++ b/pkg/tcpip/network/ipv6/ipv6.go @@ -108,7 +108,9 @@ func (e *endpoint) WritePacket(r *stack.Route, gso *stack.GSO, hdr buffer.Prepen views[0] = hdr.View() views = append(views, payload.Views()...) vv := buffer.NewVectorisedView(len(views[0])+payload.Size(), views) - e.HandlePacket(r, vv) + loopedR := r.MakeLoopedRoute() + e.HandlePacket(&loopedR, vv) + loopedR.Release() } if loop&stack.PacketOut == 0 { return nil diff --git a/pkg/tcpip/stack/route.go b/pkg/tcpip/stack/route.go index 3d4c282a9..55ed02479 100644 --- a/pkg/tcpip/stack/route.go +++ b/pkg/tcpip/stack/route.go @@ -187,3 +187,13 @@ func (r *Route) Clone() Route { r.ref.incRef() return *r } + +// MakeLoopedRoute duplicates the given route and tweaks it in case of multicast. +func (r *Route) MakeLoopedRoute() Route { + l := r.Clone() + if header.IsV4MulticastAddress(r.RemoteAddress) || header.IsV6MulticastAddress(r.RemoteAddress) { + l.RemoteAddress, l.LocalAddress = l.LocalAddress, l.RemoteAddress + l.RemoteLinkAddress = l.LocalLinkAddress + } + return l +} diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc index 53dcd58cd..6b92e05aa 100644 --- a/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc +++ b/test/syscalls/linux/socket_ipv4_udp_unbound_external_networking.cc @@ -559,5 +559,134 @@ TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf))); } +// Check that two sockets can join the same multicast group at the same time, +// and both will receive data on it. +TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, TestSendMulticastToTwo) { + auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + std::unique_ptr receivers[2] = { + ASSERT_NO_ERRNO_AND_VALUE(NewSocket()), + ASSERT_NO_ERRNO_AND_VALUE(NewSocket())}; + + ip_mreq group = {}; + group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress); + auto receiver_addr = V4Any(); + int bound_port = 0; + for (auto& receiver : receivers) { + ASSERT_THAT(setsockopt(receiver->get(), SOL_SOCKET, SO_REUSEPORT, + &kSockOptOn, sizeof(kSockOptOn)), + SyscallSucceeds()); + // Bind the receiver to the v4 any address to ensure that we can receive the + // multicast packet. + ASSERT_THAT( + bind(receiver->get(), reinterpret_cast(&receiver_addr.addr), + receiver_addr.addr_len), + SyscallSucceeds()); + socklen_t receiver_addr_len = receiver_addr.addr_len; + ASSERT_THAT(getsockname(receiver->get(), + reinterpret_cast(&receiver_addr.addr), + &receiver_addr_len), + SyscallSucceeds()); + EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len); + // On the first iteration, save the port we are bound to. On the second + // iteration, verify the port is the same as the one from the first + // iteration. In other words, both sockets listen on the same port. + if (bound_port == 0) { + bound_port = + reinterpret_cast(&receiver_addr.addr)->sin_port; + } else { + EXPECT_EQ(bound_port, + reinterpret_cast(&receiver_addr.addr)->sin_port); + } + + // Register to receive multicast packets. + ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, + &group, sizeof(group)), + SyscallSucceeds()); + } + + // Send a multicast packet to the group and verify both receivers get it. + auto send_addr = V4Multicast(); + reinterpret_cast(&send_addr.addr)->sin_port = bound_port; + char send_buf[200]; + RandomizeBuffer(send_buf, sizeof(send_buf)); + ASSERT_THAT(RetryEINTR(sendto)(sender->get(), send_buf, sizeof(send_buf), 0, + reinterpret_cast(&send_addr.addr), + send_addr.addr_len), + SyscallSucceedsWithValue(sizeof(send_buf))); + for (auto& receiver : receivers) { + char recv_buf[sizeof(send_buf)] = {}; + ASSERT_THAT( + RetryEINTR(recv)(receiver->get(), recv_buf, sizeof(recv_buf), 0), + SyscallSucceedsWithValue(sizeof(recv_buf))); + EXPECT_EQ(0, memcmp(send_buf, recv_buf, sizeof(send_buf))); + } +} + +// Check that when receiving a looped-back multicast packet, its source address +// is not a multicast address. +TEST_P(IPv4UDPUnboundExternalNetworkingSocketTest, + IpMulticastLoopbackFromAddr) { + auto sender = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + auto receiver = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + + auto receiver_addr = V4Any(); + ASSERT_THAT( + bind(receiver->get(), reinterpret_cast(&receiver_addr.addr), + receiver_addr.addr_len), + SyscallSucceeds()); + socklen_t receiver_addr_len = receiver_addr.addr_len; + ASSERT_THAT(getsockname(receiver->get(), + reinterpret_cast(&receiver_addr.addr), + &receiver_addr_len), + SyscallSucceeds()); + EXPECT_EQ(receiver_addr_len, receiver_addr.addr_len); + int receiver_port = + reinterpret_cast(&receiver_addr.addr)->sin_port; + + ip_mreq group = {}; + group.imr_multiaddr.s_addr = inet_addr(kMulticastAddress); + ASSERT_THAT(setsockopt(receiver->get(), IPPROTO_IP, IP_ADD_MEMBERSHIP, &group, + sizeof(group)), + SyscallSucceeds()); + + // Connect to the multicast address. This binds us to the outgoing interface + // and allows us to get its IP (to be compared against the src-IP on the + // receiver side). + auto sendto_addr = V4Multicast(); + reinterpret_cast(&sendto_addr.addr)->sin_port = receiver_port; + ASSERT_THAT(RetryEINTR(connect)( + sender->get(), reinterpret_cast(&sendto_addr.addr), + sendto_addr.addr_len), + SyscallSucceeds()); + TestAddress sender_addr(""); + ASSERT_THAT( + getsockname(sender->get(), reinterpret_cast(&sender_addr.addr), + &sender_addr.addr_len), + SyscallSucceeds()); + ASSERT_EQ(sizeof(struct sockaddr_in), sender_addr.addr_len); + sockaddr_in* sender_addr_in = + reinterpret_cast(&sender_addr.addr); + + // Send a multicast packet. + char send_buf[4] = {}; + ASSERT_THAT(RetryEINTR(send)(sender->get(), send_buf, sizeof(send_buf), 0), + SyscallSucceedsWithValue(sizeof(send_buf))); + + // Receive a multicast packet. + char recv_buf[sizeof(send_buf)] = {}; + TestAddress src_addr(""); + ASSERT_THAT( + RetryEINTR(recvfrom)(receiver->get(), recv_buf, sizeof(recv_buf), 0, + reinterpret_cast(&src_addr.addr), + &src_addr.addr_len), + SyscallSucceedsWithValue(sizeof(recv_buf))); + ASSERT_EQ(sizeof(struct sockaddr_in), src_addr.addr_len); + sockaddr_in* src_addr_in = reinterpret_cast(&src_addr.addr); + + // Verify that the received source IP:port matches the sender one. + EXPECT_EQ(sender_addr_in->sin_port, src_addr_in->sin_port); + EXPECT_EQ(sender_addr_in->sin_addr.s_addr, src_addr_in->sin_addr.s_addr); +} + } // namespace testing } // namespace gvisor -- cgit v1.2.3 From 57772db2e7351511de422baeecf807785709ee5d Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Wed, 5 Jun 2019 18:39:30 -0700 Subject: Shutdown host sockets on internal shutdown This is required to make the shutdown visible to peers outside the sandbox. The readClosed / writeClosed fields were dropped, as they were preventing a shutdown socket from reading the remainder of queued bytes. The host syscalls will return the appropriate errors for shutdown. The control message tests have been split out of socket_unix.cc to make the (few) remaining tests accessible to testing inherited host UDS, which don't support sending control messages. Updates #273 PiperOrigin-RevId: 251763060 --- pkg/sentry/fs/host/socket.go | 62 +- pkg/sentry/fs/host/socket_test.go | 156 --- runsc/boot/filter/config.go | 4 + test/syscalls/linux/BUILD | 23 + test/syscalls/linux/socket_abstract.cc | 5 + test/syscalls/linux/socket_filesystem.cc | 5 + test/syscalls/linux/socket_unix.cc | 1518 ++---------------------------- test/syscalls/linux/socket_unix_cmsg.cc | 1473 +++++++++++++++++++++++++++++ test/syscalls/linux/socket_unix_cmsg.h | 30 + test/syscalls/linux/socket_unix_pair.cc | 5 + 10 files changed, 1655 insertions(+), 1626 deletions(-) create mode 100644 test/syscalls/linux/socket_unix_cmsg.cc create mode 100644 test/syscalls/linux/socket_unix_cmsg.h (limited to 'test/syscalls/linux') diff --git a/pkg/sentry/fs/host/socket.go b/pkg/sentry/fs/host/socket.go index 3ed137006..e4ec0f62c 100644 --- a/pkg/sentry/fs/host/socket.go +++ b/pkg/sentry/fs/host/socket.go @@ -15,6 +15,7 @@ package host import ( + "fmt" "sync" "syscall" @@ -51,20 +52,6 @@ type ConnectedEndpoint struct { // ref keeps track of references to a connectedEndpoint. ref refs.AtomicRefCount - // mu protects fd, readClosed and writeClosed. - mu sync.RWMutex `state:"nosave"` - - // file is an *fd.FD containing the FD backing this endpoint. It must be - // set to nil if it has been closed. - file *fd.FD `state:"nosave"` - - // readClosed is true if the FD has read shutdown or if it has been closed. - readClosed bool - - // writeClosed is true if the FD has write shutdown or if it has been - // closed. - writeClosed bool - // If srfd >= 0, it is the host FD that file was imported from. srfd int `state:"wait"` @@ -78,6 +65,13 @@ type ConnectedEndpoint struct { // prevent lots of small messages from filling the real send buffer // size on the host. sndbuf int `state:"nosave"` + + // mu protects the fields below. + mu sync.RWMutex `state:"nosave"` + + // file is an *fd.FD containing the FD backing this endpoint. It must be + // set to nil if it has been closed. + file *fd.FD `state:"nosave"` } // init performs initialization required for creating new ConnectedEndpoints and @@ -208,9 +202,6 @@ func newSocket(ctx context.Context, orgfd int, saveable bool) (*fs.File, error) func (c *ConnectedEndpoint) Send(data [][]byte, controlMessages transport.ControlMessages, from tcpip.FullAddress) (uintptr, bool, *syserr.Error) { c.mu.RLock() defer c.mu.RUnlock() - if c.writeClosed { - return 0, false, syserr.ErrClosedForSend - } if !controlMessages.Empty() { return 0, false, syserr.ErrInvalidEndpointState @@ -244,8 +235,13 @@ func (c *ConnectedEndpoint) SendNotify() {} // CloseSend implements transport.ConnectedEndpoint.CloseSend. func (c *ConnectedEndpoint) CloseSend() { c.mu.Lock() - c.writeClosed = true - c.mu.Unlock() + defer c.mu.Unlock() + + if err := syscall.Shutdown(c.file.FD(), syscall.SHUT_WR); err != nil { + // A well-formed UDS shutdown can't fail. See + // net/unix/af_unix.c:unix_shutdown. + panic(fmt.Sprintf("failed write shutdown on host socket %+v: %v", c, err)) + } } // CloseNotify implements transport.ConnectedEndpoint.CloseNotify. @@ -255,9 +251,7 @@ func (c *ConnectedEndpoint) CloseNotify() {} func (c *ConnectedEndpoint) Writable() bool { c.mu.RLock() defer c.mu.RUnlock() - if c.writeClosed { - return true - } + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventOut)&waiter.EventOut != 0 } @@ -285,9 +279,6 @@ func (c *ConnectedEndpoint) EventUpdate() { func (c *ConnectedEndpoint) Recv(data [][]byte, creds bool, numRights uintptr, peek bool) (uintptr, uintptr, transport.ControlMessages, bool, tcpip.FullAddress, bool, *syserr.Error) { c.mu.RLock() defer c.mu.RUnlock() - if c.readClosed { - return 0, 0, transport.ControlMessages{}, false, tcpip.FullAddress{}, false, syserr.ErrClosedForReceive - } var cm unet.ControlMessage if numRights > 0 { @@ -344,31 +335,34 @@ func (c *ConnectedEndpoint) RecvNotify() {} // CloseRecv implements transport.Receiver.CloseRecv. func (c *ConnectedEndpoint) CloseRecv() { c.mu.Lock() - c.readClosed = true - c.mu.Unlock() + defer c.mu.Unlock() + + if err := syscall.Shutdown(c.file.FD(), syscall.SHUT_RD); err != nil { + // A well-formed UDS shutdown can't fail. See + // net/unix/af_unix.c:unix_shutdown. + panic(fmt.Sprintf("failed read shutdown on host socket %+v: %v", c, err)) + } } // Readable implements transport.Receiver.Readable. func (c *ConnectedEndpoint) Readable() bool { c.mu.RLock() defer c.mu.RUnlock() - if c.readClosed { - return true - } + return fdnotifier.NonBlockingPoll(int32(c.file.FD()), waiter.EventIn)&waiter.EventIn != 0 } // SendQueuedSize implements transport.Receiver.SendQueuedSize. func (c *ConnectedEndpoint) SendQueuedSize() int64 { - // SendQueuedSize isn't supported for host sockets because we don't allow the - // sentry to call ioctl(2). + // TODO(gvisor.dev/issue/273): SendQueuedSize isn't supported for host + // sockets because we don't allow the sentry to call ioctl(2). return -1 } // RecvQueuedSize implements transport.Receiver.RecvQueuedSize. func (c *ConnectedEndpoint) RecvQueuedSize() int64 { - // RecvQueuedSize isn't supported for host sockets because we don't allow the - // sentry to call ioctl(2). + // TODO(gvisor.dev/issue/273): RecvQueuedSize isn't supported for host + // sockets because we don't allow the sentry to call ioctl(2). return -1 } diff --git a/pkg/sentry/fs/host/socket_test.go b/pkg/sentry/fs/host/socket_test.go index 06392a65a..bc3ce5627 100644 --- a/pkg/sentry/fs/host/socket_test.go +++ b/pkg/sentry/fs/host/socket_test.go @@ -198,20 +198,6 @@ func TestListen(t *testing.T) { } } -func TestSend(t *testing.T) { - e := ConnectedEndpoint{writeClosed: true} - if _, _, err := e.Send(nil, transport.ControlMessages{}, tcpip.FullAddress{}); err != syserr.ErrClosedForSend { - t.Errorf("Got %#v.Send() = %v, want = %v", e, err, syserr.ErrClosedForSend) - } -} - -func TestRecv(t *testing.T) { - e := ConnectedEndpoint{readClosed: true} - if _, _, _, _, _, _, err := e.Recv(nil, false, 0, false); err != syserr.ErrClosedForReceive { - t.Errorf("Got %#v.Recv() = %v, want = %v", e, err, syserr.ErrClosedForReceive) - } -} - func TestPasscred(t *testing.T) { e := ConnectedEndpoint{} if got, want := e.Passcred(), false; got != want { @@ -244,20 +230,6 @@ func TestQueuedSize(t *testing.T) { } } -func TestReadable(t *testing.T) { - e := ConnectedEndpoint{readClosed: true} - if got, want := e.Readable(), true; got != want { - t.Errorf("Got %#v.Readable() = %t, want = %t", e, got, want) - } -} - -func TestWritable(t *testing.T) { - e := ConnectedEndpoint{writeClosed: true} - if got, want := e.Writable(), true; got != want { - t.Errorf("Got %#v.Writable() = %t, want = %t", e, got, want) - } -} - func TestRelease(t *testing.T) { f, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) if err != nil { @@ -272,131 +244,3 @@ func TestRelease(t *testing.T) { t.Errorf("got = %#v, want = %#v", c, want) } } - -func TestClose(t *testing.T) { - type testCase struct { - name string - cep *ConnectedEndpoint - addFD bool - f func() - want *ConnectedEndpoint - } - - var tests []testCase - - // nil is the value used by ConnectedEndpoint to indicate a closed file. - // Non-nil files are used to check if the file gets closed. - - f, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c := &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} - tests = append(tests, testCase{ - name: "First CloseRecv", - cep: c, - addFD: false, - f: c.CloseRecv, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true} - tests = append(tests, testCase{ - name: "Second CloseRecv", - cep: c, - addFD: false, - f: c.CloseRecv, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f)} - tests = append(tests, testCase{ - name: "First CloseSend", - cep: c, - addFD: false, - f: c.CloseSend, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, writeClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), writeClosed: true} - tests = append(tests, testCase{ - name: "Second CloseSend", - cep: c, - addFD: false, - f: c.CloseSend, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, writeClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), writeClosed: true} - tests = append(tests, testCase{ - name: "CloseSend then CloseRecv", - cep: c, - addFD: true, - f: c.CloseRecv, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true} - tests = append(tests, testCase{ - name: "CloseRecv then CloseSend", - cep: c, - addFD: true, - f: c.CloseSend, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true, writeClosed: true} - tests = append(tests, testCase{ - name: "Full close then CloseRecv", - cep: c, - addFD: false, - f: c.CloseRecv, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, - }) - - f, err = syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC, 0) - if err != nil { - t.Fatal("Creating socket:", err) - } - c = &ConnectedEndpoint{queue: &waiter.Queue{}, file: fd.New(f), readClosed: true, writeClosed: true} - tests = append(tests, testCase{ - name: "Full close then CloseSend", - cep: c, - addFD: false, - f: c.CloseSend, - want: &ConnectedEndpoint{queue: c.queue, file: c.file, readClosed: true, writeClosed: true}, - }) - - for _, test := range tests { - if test.addFD { - fdnotifier.AddFD(int32(test.cep.file.FD()), nil) - } - if test.f(); !reflect.DeepEqual(test.cep, test.want) { - t.Errorf("%s: got = %#v, want = %#v", test.name, test.cep, test.want) - } - } -} diff --git a/runsc/boot/filter/config.go b/runsc/boot/filter/config.go index 652da1cef..ef2dbfad2 100644 --- a/runsc/boot/filter/config.go +++ b/runsc/boot/filter/config.go @@ -246,6 +246,10 @@ var allowedSyscalls = seccomp.SyscallRules{ }, syscall.SYS_SETITIMER: {}, syscall.SYS_SHUTDOWN: []seccomp.Rule{ + // Used by fs/host to shutdown host sockets. + {seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RD)}, + {seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_WR)}, + // Used by unet to shutdown connections. {seccomp.AllowAny{}, seccomp.AllowValue(syscall.SHUT_RDWR)}, }, syscall.SYS_SIGALTSTACK: {}, diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD index 7633ab162..0cb7b47b6 100644 --- a/test/syscalls/linux/BUILD +++ b/test/syscalls/linux/BUILD @@ -2096,6 +2096,7 @@ cc_binary( deps = [ ":socket_generic_test_cases", ":socket_test_util", + ":socket_unix_cmsg_test_cases", ":socket_unix_test_cases", ":unix_domain_socket_test_util", "//test/util:test_main", @@ -2369,6 +2370,7 @@ cc_binary( deps = [ ":socket_generic_test_cases", ":socket_test_util", + ":socket_unix_cmsg_test_cases", ":socket_unix_test_cases", ":unix_domain_socket_test_util", "//test/util:test_main", @@ -2490,6 +2492,26 @@ cc_library( alwayslink = 1, ) +cc_library( + name = "socket_unix_cmsg_test_cases", + testonly = 1, + srcs = [ + "socket_unix_cmsg.cc", + ], + hdrs = [ + "socket_unix_cmsg.h", + ], + deps = [ + ":socket_test_util", + ":unix_domain_socket_test_util", + "//test/util:test_util", + "//test/util:thread_util", + "@com_google_absl//absl/strings", + "@com_google_googletest//:gtest", + ], + alwayslink = 1, +) + cc_library( name = "socket_stream_blocking_test_cases", testonly = 1, @@ -2733,6 +2755,7 @@ cc_binary( linkstatic = 1, deps = [ ":socket_test_util", + ":socket_unix_cmsg_test_cases", ":socket_unix_test_cases", ":unix_domain_socket_test_util", "//test/util:test_main", diff --git a/test/syscalls/linux/socket_abstract.cc b/test/syscalls/linux/socket_abstract.cc index 503ba986b..715d87b76 100644 --- a/test/syscalls/linux/socket_abstract.cc +++ b/test/syscalls/linux/socket_abstract.cc @@ -17,6 +17,7 @@ #include "test/syscalls/linux/socket_generic.h" #include "test/syscalls/linux/socket_test_util.h" #include "test/syscalls/linux/socket_unix.h" +#include "test/syscalls/linux/socket_unix_cmsg.h" #include "test/syscalls/linux/unix_domain_socket_test_util.h" #include "test/util/test_util.h" @@ -38,5 +39,9 @@ INSTANTIATE_TEST_SUITE_P( AbstractUnixSockets, UnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); +INSTANTIATE_TEST_SUITE_P( + AbstractUnixSockets, UnixSocketPairCmsgTest, + ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); + } // namespace testing } // namespace gvisor diff --git a/test/syscalls/linux/socket_filesystem.cc b/test/syscalls/linux/socket_filesystem.cc index e38a320f6..74e262959 100644 --- a/test/syscalls/linux/socket_filesystem.cc +++ b/test/syscalls/linux/socket_filesystem.cc @@ -17,6 +17,7 @@ #include "test/syscalls/linux/socket_generic.h" #include "test/syscalls/linux/socket_test_util.h" #include "test/syscalls/linux/socket_unix.h" +#include "test/syscalls/linux/socket_unix_cmsg.h" #include "test/syscalls/linux/unix_domain_socket_test_util.h" #include "test/util/test_util.h" @@ -38,5 +39,9 @@ INSTANTIATE_TEST_SUITE_P( FilesystemUnixSockets, UnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); +INSTANTIATE_TEST_SUITE_P( + FilesystemUnixSockets, UnixSocketPairCmsgTest, + ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); + } // namespace testing } // namespace gvisor diff --git a/test/syscalls/linux/socket_unix.cc b/test/syscalls/linux/socket_unix.cc index 95cf8d2a3..875f0391f 100644 --- a/test/syscalls/linux/socket_unix.cc +++ b/test/syscalls/linux/socket_unix.cc @@ -32,1437 +32,16 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -// This file is a generic socket test file. It must be built with another file -// that provides the test types. - -namespace gvisor { -namespace testing { - -namespace { - -TEST_P(UnixSocketPairTest, BasicFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char received_data[20]; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data, - sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); -} - -TEST_P(UnixSocketPairTest, BasicTwoFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair1 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - auto pair2 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - int sent_fds[] = {pair1->second_fd(), pair2->second_fd()}; - - ASSERT_NO_FATAL_FAILURE( - SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data))); - - char received_data[20]; - int received_fds[] = {-1, -1}; - - ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 2, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd())); - ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd())); -} - -TEST_P(UnixSocketPairTest, BasicThreeFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair1 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - auto pair2 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - auto pair3 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()}; - - ASSERT_NO_FATAL_FAILURE( - SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data))); - - char received_data[20]; - int received_fds[] = {-1, -1, -1}; - - ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 3, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd())); - ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd())); - ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[2], pair3->first_fd())); -} - -TEST_P(UnixSocketPairTest, BadFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - int sent_fd = -1; - - struct msghdr msg = {}; - char control[CMSG_SPACE(sizeof(sent_fd))]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_len = CMSG_LEN(sizeof(sent_fd)); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - memcpy(CMSG_DATA(cmsg), &sent_fd, sizeof(sent_fd)); - - struct iovec iov; - iov.iov_base = sent_data; - iov.iov_len = sizeof(sent_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0), - SyscallFailsWithErrno(EBADF)); -} - -// BasicFDPassNoSpace starts off by sending a single FD just like BasicFDPass. -// The difference is that when calling recvmsg, no space for FDs is provided, -// only space for the cmsg header. -TEST_P(UnixSocketPairTest, BasicFDPassNoSpace) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char received_data[20]; - - struct msghdr msg = {}; - std::vector control(CMSG_SPACE(0)); - msg.msg_control = &control[0]; - msg.msg_controllen = control.size(); - - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_controllen, 0); - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -// BasicFDPassNoSpaceMsgCtrunc sends an FD, but does not provide any space to -// receive it. It then verifies that the MSG_CTRUNC flag is set in the msghdr. -TEST_P(UnixSocketPairTest, BasicFDPassNoSpaceMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - std::vector control(CMSG_SPACE(0)); - msg.msg_control = &control[0]; - msg.msg_controllen = control.size(); - - char received_data[sizeof(sent_data)]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_controllen, 0); - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); -} - -// BasicFDPassNullControlMsgCtrunc sends an FD and sets contradictory values for -// msg_controllen and msg_control. msg_controllen is set to the correct size to -// accomidate the FD, but msg_control is set to NULL. In this case, msg_control -// should override msg_controllen. -TEST_P(UnixSocketPairTest, BasicFDPassNullControlMsgCtrunc) { - // FIXME(gvisor.dev/issue/207): Fix handling of NULL msg_control. - SKIP_IF(IsRunningOnGvisor()); - - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - msg.msg_controllen = CMSG_SPACE(1); - - char received_data[sizeof(sent_data)]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_controllen, 0); - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); -} - -// BasicFDPassNotEnoughSpaceMsgCtrunc sends an FD, but does not provide enough -// space to receive it. It then verifies that the MSG_CTRUNC flag is set in the -// msghdr. -TEST_P(UnixSocketPairTest, BasicFDPassNotEnoughSpaceMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - std::vector control(CMSG_SPACE(0) + 1); - msg.msg_control = &control[0]; - msg.msg_controllen = control.size(); - - char received_data[sizeof(sent_data)]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_controllen, 0); - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); -} - -// BasicThreeFDPassTruncationMsgCtrunc sends three FDs, but only provides enough -// space to receive two of them. It then verifies that the MSG_CTRUNC flag is -// set in the msghdr. -TEST_P(UnixSocketPairTest, BasicThreeFDPassTruncationMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair1 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - auto pair2 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - auto pair3 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()}; - - ASSERT_NO_FATAL_FAILURE( - SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - std::vector control(CMSG_SPACE(2 * sizeof(int))); - msg.msg_control = &control[0]; - msg.msg_controllen = control.size(); - - char received_data[sizeof(sent_data)]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(2 * sizeof(int))); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); -} - -// BasicFDPassUnalignedRecv starts off by sending a single FD just like -// BasicFDPass. The difference is that when calling recvmsg, the length of the -// receive data is only aligned on a 4 byte boundry instead of the normal 8. -TEST_P(UnixSocketPairTest, BasicFDPassUnalignedRecv) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char received_data[20]; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvSingleFDUnaligned( - sockets->second_fd(), &fd, received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); -} - -// BasicFDPassUnalignedRecvNoMsgTrunc sends one FD and only provides enough -// space to receive just it. (Normally the minimum amount of space one would -// provide would be enough space for two FDs.) It then verifies that the -// MSG_CTRUNC flag is not set in the msghdr. -TEST_P(UnixSocketPairTest, BasicFDPassUnalignedRecvNoMsgTrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - char control[CMSG_SPACE(sizeof(int)) - sizeof(int)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_flags, 0); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); -} - -// BasicTwoFDPassUnalignedRecvTruncationMsgTrunc sends two FDs, but only -// provides enough space to receive one of them. It then verifies that the -// MSG_CTRUNC flag is set in the msghdr. -TEST_P(UnixSocketPairTest, BasicTwoFDPassUnalignedRecvTruncationMsgTrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - int sent_fds[] = {pair->first_fd(), pair->second_fd()}; - - ASSERT_NO_FATAL_FAILURE( - SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - // CMSG_SPACE rounds up to two FDs, we only want one. - char control[CMSG_SPACE(sizeof(int)) - sizeof(int)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); -} - -TEST_P(UnixSocketPairTest, ConcurrentBasicFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - int sockfd1 = sockets->first_fd(); - auto recv_func = [sockfd1, sent_data]() { - char received_data[20]; - int fd = -1; - RecvSingleFD(sockfd1, &fd, received_data, sizeof(received_data)); - ASSERT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - char buf[20]; - ASSERT_THAT(ReadFd(fd, buf, sizeof(buf)), - SyscallSucceedsWithValue(sizeof(buf))); - ASSERT_THAT(WriteFd(fd, buf, sizeof(buf)), - SyscallSucceedsWithValue(sizeof(buf))); - }; - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->second_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - ScopedThread t(recv_func); - - RandomizeBuffer(sent_data, sizeof(sent_data)); - ASSERT_THAT(WriteFd(pair->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - char received_data[20]; - ASSERT_THAT(ReadFd(pair->first_fd(), received_data, sizeof(received_data)), - SyscallSucceedsWithValue(sizeof(received_data))); - - t.Join(); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -// FDPassNoRecv checks that the control message can be safely ignored by using -// read(2) instead of recvmsg(2). -TEST_P(UnixSocketPairTest, FDPassNoRecv) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - // Read while ignoring the passed FD. - char received_data[20]; - ASSERT_THAT( - ReadFd(sockets->second_fd(), received_data, sizeof(received_data)), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - // Check that the socket still works for reads and writes. - ASSERT_NO_FATAL_FAILURE( - TransferTest(sockets->first_fd(), sockets->second_fd())); -} - -// FDPassInterspersed1 checks that sent control messages cannot be read before -// their associated data has been read. -TEST_P(UnixSocketPairTest, FDPassInterspersed1) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char written_data[20]; - RandomizeBuffer(written_data, sizeof(written_data)); - - ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)), - SyscallSucceedsWithValue(sizeof(written_data))); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - // Check that we don't get a control message, but do get the data. - char received_data[20]; - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)); - EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data))); -} - -// FDPassInterspersed2 checks that sent control messages cannot be read after -// their assocated data has been read while ignoring the control message by -// using read(2) instead of recvmsg(2). -TEST_P(UnixSocketPairTest, FDPassInterspersed2) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char written_data[20]; - RandomizeBuffer(written_data, sizeof(written_data)); - ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)), - SyscallSucceedsWithValue(sizeof(written_data))); - - char received_data[20]; - ASSERT_THAT( - ReadFd(sockets->second_fd(), received_data, sizeof(received_data)), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data))); -} - -TEST_P(UnixSocketPairTest, FDPassNotCoalesced) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data1[20]; - RandomizeBuffer(sent_data1, sizeof(sent_data1)); - - auto pair1 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair1->second_fd(), - sent_data1, sizeof(sent_data1))); - - char sent_data2[20]; - RandomizeBuffer(sent_data2, sizeof(sent_data2)); - - auto pair2 = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair2->second_fd(), - sent_data2, sizeof(sent_data2))); - - char received_data1[sizeof(sent_data1) + sizeof(sent_data2)]; - int received_fd1 = -1; - - RecvSingleFD(sockets->second_fd(), &received_fd1, received_data1, - sizeof(received_data1), sizeof(sent_data1)); - - EXPECT_EQ(0, memcmp(sent_data1, received_data1, sizeof(sent_data1))); - TransferTest(pair1->first_fd(), pair1->second_fd()); - - char received_data2[sizeof(sent_data1) + sizeof(sent_data2)]; - int received_fd2 = -1; - - RecvSingleFD(sockets->second_fd(), &received_fd2, received_data2, - sizeof(received_data2), sizeof(sent_data2)); - - EXPECT_EQ(0, memcmp(sent_data2, received_data2, sizeof(sent_data2))); - TransferTest(pair2->first_fd(), pair2->second_fd()); -} - -TEST_P(UnixSocketPairTest, FDPassPeek) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char peek_data[20]; - int peek_fd = -1; - PeekSingleFD(sockets->second_fd(), &peek_fd, peek_data, sizeof(peek_data)); - EXPECT_EQ(0, memcmp(sent_data, peek_data, sizeof(sent_data))); - TransferTest(peek_fd, pair->first_fd()); - EXPECT_THAT(close(peek_fd), SyscallSucceeds()); - - char received_data[20]; - int received_fd = -1; - RecvSingleFD(sockets->second_fd(), &received_fd, received_data, - sizeof(received_data)); - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - TransferTest(received_fd, pair->first_fd()); - EXPECT_THAT(close(received_fd), SyscallSucceeds()); -} - -TEST_P(UnixSocketPairTest, BasicCredPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - EXPECT_EQ(sent_creds.pid, received_creds.pid); - EXPECT_EQ(sent_creds.uid, received_creds.uid); - EXPECT_EQ(sent_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, SendNullCredsBeforeSoPassCredRecvEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - ASSERT_NO_FATAL_FAILURE( - SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds { - 0, 65534, 65534 - }; - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, SendNullCredsAfterSoPassCredRecvEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - SetSoPassCred(sockets->second_fd()); - - ASSERT_NO_FATAL_FAILURE( - SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); - - char received_data[20]; - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds; - ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, SendNullCredsBeforeSoPassCredSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - ASSERT_NO_FATAL_FAILURE( - SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->first_fd()); - - char received_data[20]; - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -TEST_P(UnixSocketPairTest, SendNullCredsAfterSoPassCredSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - SetSoPassCred(sockets->first_fd()); - - ASSERT_NO_FATAL_FAILURE( - SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); - - char received_data[20]; - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -TEST_P(UnixSocketPairTest, SendNullCredsBeforeSoPassCredRecvEndAfterSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - SetSoPassCred(sockets->first_fd()); - - ASSERT_NO_FATAL_FAILURE( - SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds; - ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, WriteBeforeSoPassCredRecvEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds { - 0, 65534, 65534 - }; - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, WriteAfterSoPassCredRecvEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - SetSoPassCred(sockets->second_fd()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - char received_data[20]; - - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds; - ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, WriteBeforeSoPassCredSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - SetSoPassCred(sockets->first_fd()); - - char received_data[20]; - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -TEST_P(UnixSocketPairTest, WriteAfterSoPassCredSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - SetSoPassCred(sockets->first_fd()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - char received_data[20]; - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -TEST_P(UnixSocketPairTest, WriteBeforeSoPassCredRecvEndAfterSendEnd) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - SetSoPassCred(sockets->first_fd()); - - ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), - SyscallSucceedsWithValue(sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - - struct ucred received_creds; - ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, - received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds; - ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); -} - -TEST_P(UnixSocketPairTest, CredPassTruncated) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - struct msghdr msg = {}; - char control[CMSG_SPACE(0) + sizeof(pid_t)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - EXPECT_EQ(msg.msg_controllen, sizeof(control)); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); - - pid_t pid = 0; - memcpy(&pid, CMSG_DATA(cmsg), sizeof(pid)); - EXPECT_EQ(pid, sent_creds.pid); -} - -// CredPassNoMsgCtrunc passes a full set of credentials. It then verifies that -// receiving the full set does not result in MSG_CTRUNC being set in the msghdr. -TEST_P(UnixSocketPairTest, CredPassNoMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - struct msghdr msg = {}; - char control[CMSG_SPACE(sizeof(struct ucred))]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - // The control message should not be truncated. - EXPECT_EQ(msg.msg_flags, 0); - EXPECT_EQ(msg.msg_controllen, sizeof(control)); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct ucred))); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); -} - -// CredPassNoSpaceMsgCtrunc passes a full set of credentials. It then receives -// the data without providing space for any credentials and verifies that -// MSG_CTRUNC is set in the msghdr. -TEST_P(UnixSocketPairTest, CredPassNoSpaceMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - struct msghdr msg = {}; - char control[CMSG_SPACE(0)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - // The control message should be truncated. - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); - EXPECT_EQ(msg.msg_controllen, sizeof(control)); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); -} - -// CredPassTruncatedMsgCtrunc passes a full set of credentials. It then receives -// the data while providing enough space for only the first field of the -// credentials and verifies that MSG_CTRUNC is set in the msghdr. -TEST_P(UnixSocketPairTest, CredPassTruncatedMsgCtrunc) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - struct msghdr msg = {}; - char control[CMSG_SPACE(0) + sizeof(pid_t)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[sizeof(sent_data)] = {}; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - // The control message should be truncated. - EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); - EXPECT_EQ(msg.msg_controllen, sizeof(control)); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); -} - -TEST_P(UnixSocketPairTest, SoPassCred) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - int opt; - socklen_t optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_FALSE(opt); - - optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_FALSE(opt); - - SetSoPassCred(sockets->first_fd()); - - optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_TRUE(opt); - - optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_FALSE(opt); - - int zero = 0; - EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &zero, - sizeof(zero)), - SyscallSucceeds()); - - optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_FALSE(opt); - - optLen = sizeof(opt); - EXPECT_THAT( - getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), - SyscallSucceeds()); - EXPECT_FALSE(opt); -} - -TEST_P(UnixSocketPairTest, NoDataCredPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct msghdr msg = {}; - - struct iovec iov; - iov.iov_base = sent_data; - iov.iov_len = sizeof(sent_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - char control[CMSG_SPACE(0)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_CREDENTIALS; - cmsg->cmsg_len = CMSG_LEN(0); - - ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0), - SyscallFailsWithErrno(EINVAL)); -} - -TEST_P(UnixSocketPairTest, NoPassCred) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - ASSERT_NO_FATAL_FAILURE( - SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); - - char received_data[20]; - - ASSERT_NO_FATAL_FAILURE( - RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); -} - -TEST_P(UnixSocketPairTest, CredAndFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - struct ucred sent_creds; - - ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendCredsAndFD(sockets->first_fd(), sent_creds, - pair->second_fd(), sent_data, - sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - struct ucred received_creds; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, - &fd, received_data, - sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - EXPECT_EQ(sent_creds.pid, received_creds.pid); - EXPECT_EQ(sent_creds.uid, received_creds.uid); - EXPECT_EQ(sent_creds.gid, received_creds.gid); - - ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); -} - -TEST_P(UnixSocketPairTest, FDPassBeforeSoPassCred) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - SetSoPassCred(sockets->second_fd()); - - char received_data[20]; - struct ucred received_creds; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, - &fd, received_data, - sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds { - 0, 65534, 65534 - }; - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); - - ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); -} - -TEST_P(UnixSocketPairTest, FDPassAfterSoPassCred) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - SetSoPassCred(sockets->second_fd()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char received_data[20]; - struct ucred received_creds; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, - &fd, received_data, - sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - struct ucred want_creds; - ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); - ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); - - EXPECT_EQ(want_creds.pid, received_creds.pid); - EXPECT_EQ(want_creds.uid, received_creds.uid); - EXPECT_EQ(want_creds.gid, received_creds.gid); - - ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); -} - -TEST_P(UnixSocketPairTest, CloexecDroppedWhenFDPassed) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = ASSERT_NO_ERRNO_AND_VALUE( - UnixDomainSocketPair(SOCK_SEQPACKET | SOCK_CLOEXEC).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - char received_data[20]; - int fd = -1; - ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data, - sizeof(received_data))); - - EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(0)); -} - -TEST_P(UnixSocketPairTest, CloexecRecvFDPass) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - char control[CMSG_SPACE(sizeof(int))]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - struct iovec iov; - char received_data[20]; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CMSG_CLOEXEC), - SyscallSucceedsWithValue(sizeof(received_data))); - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); - ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET); - ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS); - - int fd = -1; - memcpy(&fd, CMSG_DATA(cmsg), sizeof(int)); - - EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC)); -} - -TEST_P(UnixSocketPairTest, FDPassAfterSoPassCredWithoutCredSpace) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - SetSoPassCred(sockets->second_fd()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - char control[CMSG_LEN(0)]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[20]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); - - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - - EXPECT_EQ(msg.msg_controllen, sizeof(control)); - - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); - EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); - EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); -} - -// This test will validate that MSG_CTRUNC as an input flag to recvmsg will -// not appear as an output flag on the control message when truncation doesn't -// happen. -TEST_P(UnixSocketPairTest, MsgCtruncInputIsNoop) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - char control[CMSG_SPACE(sizeof(int)) /* we're passing a single fd */]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - struct iovec iov; - char received_data[20]; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CTRUNC), - SyscallSucceedsWithValue(sizeof(received_data))); - struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); - ASSERT_NE(cmsg, nullptr); - ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); - ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET); - ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS); - - // Now we should verify that MSG_CTRUNC wasn't set as an output flag. - EXPECT_EQ(msg.msg_flags & MSG_CTRUNC, 0); -} - -TEST_P(UnixSocketPairTest, FDPassAfterSoPassCredWithoutCredHeaderSpace) { - auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - - char sent_data[20]; - RandomizeBuffer(sent_data, sizeof(sent_data)); - - auto pair = - ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); - - SetSoPassCred(sockets->second_fd()); - - ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), - sent_data, sizeof(sent_data))); - - struct msghdr msg = {}; - char control[CMSG_LEN(0) / 2]; - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - char received_data[20]; - struct iovec iov; - iov.iov_base = received_data; - iov.iov_len = sizeof(received_data); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; +// This file contains tests specific to Unix domain sockets. It does not contain +// tests for UDS control messages. Those belong in socket_unix_cmsg.cc. +// +// This file is a generic socket test file. It must be built with another file +// that provides the test types. - ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), - SyscallSucceedsWithValue(sizeof(received_data))); +namespace gvisor { +namespace testing { - EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); - EXPECT_EQ(msg.msg_controllen, 0); -} +namespace { TEST_P(UnixSocketPairTest, InvalidGetSockOpt) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); @@ -1519,6 +98,14 @@ TEST_P(UnixSocketPairTest, RecvmmsgTimeoutAfterRecv) { TEST_P(UnixSocketPairTest, TIOCINQSucceeds) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + if (IsRunningOnGvisor()) { + // TODO(gvisor.dev/issue/273): Inherited host UDS don't support TIOCINQ. + // Skip the test. + int size = -1; + int ret = ioctl(sockets->first_fd(), TIOCINQ, &size); + SKIP_IF(ret == -1 && errno == ENOTTY); + } + int size = -1; EXPECT_THAT(ioctl(sockets->first_fd(), TIOCINQ, &size), SyscallSucceeds()); EXPECT_EQ(size, 0); @@ -1544,6 +131,14 @@ TEST_P(UnixSocketPairTest, TIOCINQSucceeds) { TEST_P(UnixSocketPairTest, TIOCOUTQSucceeds) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + if (IsRunningOnGvisor()) { + // TODO(gvisor.dev/issue/273): Inherited host UDS don't support TIOCOUTQ. + // Skip the test. + int size = -1; + int ret = ioctl(sockets->second_fd(), TIOCOUTQ, &size); + SKIP_IF(ret == -1 && errno == ENOTTY); + } + int size = -1; EXPECT_THAT(ioctl(sockets->second_fd(), TIOCOUTQ, &size), SyscallSucceeds()); EXPECT_EQ(size, 0); @@ -1580,19 +175,70 @@ TEST_P(UnixSocketPairTest, NetdeviceIoctlsSucceed) { } } -TEST_P(UnixSocketPairTest, SocketShutdown) { +TEST_P(UnixSocketPairTest, Shutdown) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - char buf[20]; + const std::string data = "abc"; - ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), 3), - SyscallSucceedsWithValue(3)); + ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()), + SyscallSucceedsWithValue(data.size())); + ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds()); ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RDWR), SyscallSucceeds()); // Shutting down a socket does not clear the buffer. - ASSERT_THAT(ReadFd(sockets->second_fd(), buf, 3), - SyscallSucceedsWithValue(3)); - EXPECT_EQ(data, absl::string_view(buf, 3)); + char buf[3]; + ASSERT_THAT(ReadFd(sockets->second_fd(), buf, data.size()), + SyscallSucceedsWithValue(data.size())); + EXPECT_EQ(data, absl::string_view(buf, data.size())); +} + +TEST_P(UnixSocketPairTest, ShutdownRead) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RD), SyscallSucceeds()); + + // When the socket is shutdown for read, read behavior varies between + // different socket types. This is covered by the various ReadOneSideClosed + // test cases. + + // ... and the peer cannot write. + const std::string data = "abc"; + EXPECT_THAT(WriteFd(sockets->second_fd(), data.c_str(), data.size()), + SyscallFailsWithErrno(EPIPE)); + + // ... but the socket can still write. + ASSERT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()), + SyscallSucceedsWithValue(data.size())); + + // ... and the peer can still read. + char buf[3]; + EXPECT_THAT(ReadFd(sockets->second_fd(), buf, data.size()), + SyscallSucceedsWithValue(data.size())); + EXPECT_EQ(data, absl::string_view(buf, data.size())); +} + +TEST_P(UnixSocketPairTest, ShutdownWrite) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_WR), SyscallSucceeds()); + + // When the socket is shutdown for write, it cannot write. + const std::string data = "abc"; + EXPECT_THAT(WriteFd(sockets->first_fd(), data.c_str(), data.size()), + SyscallFailsWithErrno(EPIPE)); + + // ... and the peer read behavior varies between different socket types. This + // is covered by the various ReadOneSideClosed test cases. + + // ... but the peer can still write. + char buf[3]; + ASSERT_THAT(WriteFd(sockets->second_fd(), data.c_str(), data.size()), + SyscallSucceedsWithValue(data.size())); + + // ... and the socket can still read. + EXPECT_THAT(ReadFd(sockets->first_fd(), buf, data.size()), + SyscallSucceedsWithValue(data.size())); + EXPECT_EQ(data, absl::string_view(buf, data.size())); } TEST_P(UnixSocketPairTest, SocketReopenFromProcfs) { diff --git a/test/syscalls/linux/socket_unix_cmsg.cc b/test/syscalls/linux/socket_unix_cmsg.cc new file mode 100644 index 000000000..b0ab26847 --- /dev/null +++ b/test/syscalls/linux/socket_unix_cmsg.cc @@ -0,0 +1,1473 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "test/syscalls/linux/socket_unix_cmsg.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "gtest/gtest.h" +#include "gtest/gtest.h" +#include "absl/strings/string_view.h" +#include "test/syscalls/linux/socket_test_util.h" +#include "test/syscalls/linux/unix_domain_socket_test_util.h" +#include "test/util/test_util.h" +#include "test/util/thread_util.h" + +// This file contains tests for control message in Unix domain sockets. +// +// This file is a generic socket test file. It must be built with another file +// that provides the test types. + +namespace gvisor { +namespace testing { + +namespace { + +TEST_P(UnixSocketPairCmsgTest, BasicFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char received_data[20]; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data, + sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, BasicTwoFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair1 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + auto pair2 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + int sent_fds[] = {pair1->second_fd(), pair2->second_fd()}; + + ASSERT_NO_FATAL_FAILURE( + SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data))); + + char received_data[20]; + int received_fds[] = {-1, -1}; + + ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 2, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd())); + ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, BasicThreeFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair1 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + auto pair2 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + auto pair3 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()}; + + ASSERT_NO_FATAL_FAILURE( + SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data))); + + char received_data[20]; + int received_fds[] = {-1, -1, -1}; + + ASSERT_NO_FATAL_FAILURE(RecvFDs(sockets->second_fd(), received_fds, 3, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[0], pair1->first_fd())); + ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[1], pair2->first_fd())); + ASSERT_NO_FATAL_FAILURE(TransferTest(received_fds[2], pair3->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, BadFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + int sent_fd = -1; + + struct msghdr msg = {}; + char control[CMSG_SPACE(sizeof(sent_fd))]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_len = CMSG_LEN(sizeof(sent_fd)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy(CMSG_DATA(cmsg), &sent_fd, sizeof(sent_fd)); + + struct iovec iov; + iov.iov_base = sent_data; + iov.iov_len = sizeof(sent_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0), + SyscallFailsWithErrno(EBADF)); +} + +// BasicFDPassNoSpace starts off by sending a single FD just like BasicFDPass. +// The difference is that when calling recvmsg, no space for FDs is provided, +// only space for the cmsg header. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassNoSpace) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char received_data[20]; + + struct msghdr msg = {}; + std::vector control(CMSG_SPACE(0)); + msg.msg_control = &control[0]; + msg.msg_controllen = control.size(); + + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_controllen, 0); + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +// BasicFDPassNoSpaceMsgCtrunc sends an FD, but does not provide any space to +// receive it. It then verifies that the MSG_CTRUNC flag is set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassNoSpaceMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + std::vector control(CMSG_SPACE(0)); + msg.msg_control = &control[0]; + msg.msg_controllen = control.size(); + + char received_data[sizeof(sent_data)]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_controllen, 0); + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); +} + +// BasicFDPassNullControlMsgCtrunc sends an FD and sets contradictory values for +// msg_controllen and msg_control. msg_controllen is set to the correct size to +// accomidate the FD, but msg_control is set to NULL. In this case, msg_control +// should override msg_controllen. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassNullControlMsgCtrunc) { + // FIXME(gvisor.dev/issue/207): Fix handling of NULL msg_control. + SKIP_IF(IsRunningOnGvisor()); + + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + msg.msg_controllen = CMSG_SPACE(1); + + char received_data[sizeof(sent_data)]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_controllen, 0); + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); +} + +// BasicFDPassNotEnoughSpaceMsgCtrunc sends an FD, but does not provide enough +// space to receive it. It then verifies that the MSG_CTRUNC flag is set in the +// msghdr. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassNotEnoughSpaceMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + std::vector control(CMSG_SPACE(0) + 1); + msg.msg_control = &control[0]; + msg.msg_controllen = control.size(); + + char received_data[sizeof(sent_data)]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_controllen, 0); + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); +} + +// BasicThreeFDPassTruncationMsgCtrunc sends three FDs, but only provides enough +// space to receive two of them. It then verifies that the MSG_CTRUNC flag is +// set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, BasicThreeFDPassTruncationMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair1 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + auto pair2 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + auto pair3 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + int sent_fds[] = {pair1->second_fd(), pair2->second_fd(), pair3->second_fd()}; + + ASSERT_NO_FATAL_FAILURE( + SendFDs(sockets->first_fd(), sent_fds, 3, sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + std::vector control(CMSG_SPACE(2 * sizeof(int))); + msg.msg_control = &control[0]; + msg.msg_controllen = control.size(); + + char received_data[sizeof(sent_data)]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(2 * sizeof(int))); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); +} + +// BasicFDPassUnalignedRecv starts off by sending a single FD just like +// BasicFDPass. The difference is that when calling recvmsg, the length of the +// receive data is only aligned on a 4 byte boundry instead of the normal 8. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassUnalignedRecv) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char received_data[20]; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvSingleFDUnaligned( + sockets->second_fd(), &fd, received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); +} + +// BasicFDPassUnalignedRecvNoMsgTrunc sends one FD and only provides enough +// space to receive just it. (Normally the minimum amount of space one would +// provide would be enough space for two FDs.) It then verifies that the +// MSG_CTRUNC flag is not set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, BasicFDPassUnalignedRecvNoMsgTrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + char control[CMSG_SPACE(sizeof(int)) - sizeof(int)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_flags, 0); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); +} + +// BasicTwoFDPassUnalignedRecvTruncationMsgTrunc sends two FDs, but only +// provides enough space to receive one of them. It then verifies that the +// MSG_CTRUNC flag is set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, BasicTwoFDPassUnalignedRecvTruncationMsgTrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + int sent_fds[] = {pair->first_fd(), pair->second_fd()}; + + ASSERT_NO_FATAL_FAILURE( + SendFDs(sockets->first_fd(), sent_fds, 2, sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + // CMSG_SPACE rounds up to two FDs, we only want one. + char control[CMSG_SPACE(sizeof(int)) - sizeof(int)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_RIGHTS); +} + +TEST_P(UnixSocketPairCmsgTest, ConcurrentBasicFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + int sockfd1 = sockets->first_fd(); + auto recv_func = [sockfd1, sent_data]() { + char received_data[20]; + int fd = -1; + RecvSingleFD(sockfd1, &fd, received_data, sizeof(received_data)); + ASSERT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + char buf[20]; + ASSERT_THAT(ReadFd(fd, buf, sizeof(buf)), + SyscallSucceedsWithValue(sizeof(buf))); + ASSERT_THAT(WriteFd(fd, buf, sizeof(buf)), + SyscallSucceedsWithValue(sizeof(buf))); + }; + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->second_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + ScopedThread t(recv_func); + + RandomizeBuffer(sent_data, sizeof(sent_data)); + ASSERT_THAT(WriteFd(pair->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + char received_data[20]; + ASSERT_THAT(ReadFd(pair->first_fd(), received_data, sizeof(received_data)), + SyscallSucceedsWithValue(sizeof(received_data))); + + t.Join(); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +// FDPassNoRecv checks that the control message can be safely ignored by using +// read(2) instead of recvmsg(2). +TEST_P(UnixSocketPairCmsgTest, FDPassNoRecv) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + // Read while ignoring the passed FD. + char received_data[20]; + ASSERT_THAT( + ReadFd(sockets->second_fd(), received_data, sizeof(received_data)), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + // Check that the socket still works for reads and writes. + ASSERT_NO_FATAL_FAILURE( + TransferTest(sockets->first_fd(), sockets->second_fd())); +} + +// FDPassInterspersed1 checks that sent control messages cannot be read before +// their associated data has been read. +TEST_P(UnixSocketPairCmsgTest, FDPassInterspersed1) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char written_data[20]; + RandomizeBuffer(written_data, sizeof(written_data)); + + ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)), + SyscallSucceedsWithValue(sizeof(written_data))); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + // Check that we don't get a control message, but do get the data. + char received_data[20]; + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data)); + EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data))); +} + +// FDPassInterspersed2 checks that sent control messages cannot be read after +// their assocated data has been read while ignoring the control message by +// using read(2) instead of recvmsg(2). +TEST_P(UnixSocketPairCmsgTest, FDPassInterspersed2) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char written_data[20]; + RandomizeBuffer(written_data, sizeof(written_data)); + ASSERT_THAT(WriteFd(sockets->first_fd(), written_data, sizeof(written_data)), + SyscallSucceedsWithValue(sizeof(written_data))); + + char received_data[20]; + ASSERT_THAT( + ReadFd(sockets->second_fd(), received_data, sizeof(received_data)), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + EXPECT_EQ(0, memcmp(written_data, received_data, sizeof(written_data))); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassNotCoalesced) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data1[20]; + RandomizeBuffer(sent_data1, sizeof(sent_data1)); + + auto pair1 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair1->second_fd(), + sent_data1, sizeof(sent_data1))); + + char sent_data2[20]; + RandomizeBuffer(sent_data2, sizeof(sent_data2)); + + auto pair2 = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair2->second_fd(), + sent_data2, sizeof(sent_data2))); + + char received_data1[sizeof(sent_data1) + sizeof(sent_data2)]; + int received_fd1 = -1; + + RecvSingleFD(sockets->second_fd(), &received_fd1, received_data1, + sizeof(received_data1), sizeof(sent_data1)); + + EXPECT_EQ(0, memcmp(sent_data1, received_data1, sizeof(sent_data1))); + TransferTest(pair1->first_fd(), pair1->second_fd()); + + char received_data2[sizeof(sent_data1) + sizeof(sent_data2)]; + int received_fd2 = -1; + + RecvSingleFD(sockets->second_fd(), &received_fd2, received_data2, + sizeof(received_data2), sizeof(sent_data2)); + + EXPECT_EQ(0, memcmp(sent_data2, received_data2, sizeof(sent_data2))); + TransferTest(pair2->first_fd(), pair2->second_fd()); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassPeek) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char peek_data[20]; + int peek_fd = -1; + PeekSingleFD(sockets->second_fd(), &peek_fd, peek_data, sizeof(peek_data)); + EXPECT_EQ(0, memcmp(sent_data, peek_data, sizeof(sent_data))); + TransferTest(peek_fd, pair->first_fd()); + EXPECT_THAT(close(peek_fd), SyscallSucceeds()); + + char received_data[20]; + int received_fd = -1; + RecvSingleFD(sockets->second_fd(), &received_fd, received_data, + sizeof(received_data)); + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + TransferTest(received_fd, pair->first_fd()); + EXPECT_THAT(close(received_fd), SyscallSucceeds()); +} + +TEST_P(UnixSocketPairCmsgTest, BasicCredPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + EXPECT_EQ(sent_creds.pid, received_creds.pid); + EXPECT_EQ(sent_creds.uid, received_creds.uid); + EXPECT_EQ(sent_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, SendNullCredsBeforeSoPassCredRecvEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + ASSERT_NO_FATAL_FAILURE( + SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds { + 0, 65534, 65534 + }; + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, SendNullCredsAfterSoPassCredRecvEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + SetSoPassCred(sockets->second_fd()); + + ASSERT_NO_FATAL_FAILURE( + SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); + + char received_data[20]; + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds; + ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, SendNullCredsBeforeSoPassCredSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + ASSERT_NO_FATAL_FAILURE( + SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->first_fd()); + + char received_data[20]; + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +TEST_P(UnixSocketPairCmsgTest, SendNullCredsAfterSoPassCredSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + SetSoPassCred(sockets->first_fd()); + + ASSERT_NO_FATAL_FAILURE( + SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); + + char received_data[20]; + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +TEST_P(UnixSocketPairCmsgTest, + SendNullCredsBeforeSoPassCredRecvEndAfterSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + SetSoPassCred(sockets->first_fd()); + + ASSERT_NO_FATAL_FAILURE( + SendNullCmsg(sockets->first_fd(), sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds; + ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredRecvEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds { + 0, 65534, 65534 + }; + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, WriteAfterSoPassCredRecvEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + SetSoPassCred(sockets->second_fd()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + char received_data[20]; + + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds; + ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + SetSoPassCred(sockets->first_fd()); + + char received_data[20]; + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +TEST_P(UnixSocketPairCmsgTest, WriteAfterSoPassCredSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + SetSoPassCred(sockets->first_fd()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + char received_data[20]; + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +TEST_P(UnixSocketPairCmsgTest, WriteBeforeSoPassCredRecvEndAfterSendEnd) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + SetSoPassCred(sockets->first_fd()); + + ASSERT_THAT(WriteFd(sockets->first_fd(), sent_data, sizeof(sent_data)), + SyscallSucceedsWithValue(sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + + struct ucred received_creds; + ASSERT_NO_FATAL_FAILURE(RecvCreds(sockets->second_fd(), &received_creds, + received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds; + ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); +} + +TEST_P(UnixSocketPairCmsgTest, CredPassTruncated) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + struct msghdr msg = {}; + char control[CMSG_SPACE(0) + sizeof(pid_t)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + EXPECT_EQ(msg.msg_controllen, sizeof(control)); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); + + pid_t pid = 0; + memcpy(&pid, CMSG_DATA(cmsg), sizeof(pid)); + EXPECT_EQ(pid, sent_creds.pid); +} + +// CredPassNoMsgCtrunc passes a full set of credentials. It then verifies that +// receiving the full set does not result in MSG_CTRUNC being set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, CredPassNoMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + struct msghdr msg = {}; + char control[CMSG_SPACE(sizeof(struct ucred))]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + // The control message should not be truncated. + EXPECT_EQ(msg.msg_flags, 0); + EXPECT_EQ(msg.msg_controllen, sizeof(control)); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(struct ucred))); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); +} + +// CredPassNoSpaceMsgCtrunc passes a full set of credentials. It then receives +// the data without providing space for any credentials and verifies that +// MSG_CTRUNC is set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, CredPassNoSpaceMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + struct msghdr msg = {}; + char control[CMSG_SPACE(0)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + // The control message should be truncated. + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); + EXPECT_EQ(msg.msg_controllen, sizeof(control)); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); +} + +// CredPassTruncatedMsgCtrunc passes a full set of credentials. It then receives +// the data while providing enough space for only the first field of the +// credentials and verifies that MSG_CTRUNC is set in the msghdr. +TEST_P(UnixSocketPairCmsgTest, CredPassTruncatedMsgCtrunc) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + struct msghdr msg = {}; + char control[CMSG_SPACE(0) + sizeof(pid_t)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[sizeof(sent_data)] = {}; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + // The control message should be truncated. + EXPECT_EQ(msg.msg_flags, MSG_CTRUNC); + EXPECT_EQ(msg.msg_controllen, sizeof(control)); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); +} + +TEST_P(UnixSocketPairCmsgTest, SoPassCred) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + int opt; + socklen_t optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_FALSE(opt); + + optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_FALSE(opt); + + SetSoPassCred(sockets->first_fd()); + + optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_TRUE(opt); + + optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_FALSE(opt); + + int zero = 0; + EXPECT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &zero, + sizeof(zero)), + SyscallSucceeds()); + + optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->first_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_FALSE(opt); + + optLen = sizeof(opt); + EXPECT_THAT( + getsockopt(sockets->second_fd(), SOL_SOCKET, SO_PASSCRED, &opt, &optLen), + SyscallSucceeds()); + EXPECT_FALSE(opt); +} + +TEST_P(UnixSocketPairCmsgTest, NoDataCredPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct msghdr msg = {}; + + struct iovec iov; + iov.iov_base = sent_data; + iov.iov_len = sizeof(sent_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + char control[CMSG_SPACE(0)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_CREDENTIALS; + cmsg->cmsg_len = CMSG_LEN(0); + + ASSERT_THAT(RetryEINTR(sendmsg)(sockets->first_fd(), &msg, 0), + SyscallFailsWithErrno(EINVAL)); +} + +TEST_P(UnixSocketPairCmsgTest, NoPassCred) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + ASSERT_NO_FATAL_FAILURE( + SendCreds(sockets->first_fd(), sent_creds, sent_data, sizeof(sent_data))); + + char received_data[20]; + + ASSERT_NO_FATAL_FAILURE( + RecvNoCmsg(sockets->second_fd(), received_data, sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); +} + +TEST_P(UnixSocketPairCmsgTest, CredAndFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + struct ucred sent_creds; + + ASSERT_THAT(sent_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(sent_creds.gid = getgid(), SyscallSucceeds()); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendCredsAndFD(sockets->first_fd(), sent_creds, + pair->second_fd(), sent_data, + sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + struct ucred received_creds; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, + &fd, received_data, + sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + EXPECT_EQ(sent_creds.pid, received_creds.pid); + EXPECT_EQ(sent_creds.uid, received_creds.uid); + EXPECT_EQ(sent_creds.gid, received_creds.gid); + + ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassBeforeSoPassCred) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + SetSoPassCred(sockets->second_fd()); + + char received_data[20]; + struct ucred received_creds; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, + &fd, received_data, + sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds { + 0, 65534, 65534 + }; + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); + + ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCred) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + SetSoPassCred(sockets->second_fd()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char received_data[20]; + struct ucred received_creds; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvCredsAndFD(sockets->second_fd(), &received_creds, + &fd, received_data, + sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + struct ucred want_creds; + ASSERT_THAT(want_creds.pid = getpid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.uid = getuid(), SyscallSucceeds()); + ASSERT_THAT(want_creds.gid = getgid(), SyscallSucceeds()); + + EXPECT_EQ(want_creds.pid, received_creds.pid); + EXPECT_EQ(want_creds.uid, received_creds.uid); + EXPECT_EQ(want_creds.gid, received_creds.gid); + + ASSERT_NO_FATAL_FAILURE(TransferTest(fd, pair->first_fd())); +} + +TEST_P(UnixSocketPairCmsgTest, CloexecDroppedWhenFDPassed) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = ASSERT_NO_ERRNO_AND_VALUE( + UnixDomainSocketPair(SOCK_SEQPACKET | SOCK_CLOEXEC).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + char received_data[20]; + int fd = -1; + ASSERT_NO_FATAL_FAILURE(RecvSingleFD(sockets->second_fd(), &fd, received_data, + sizeof(received_data))); + + EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(0)); +} + +TEST_P(UnixSocketPairCmsgTest, CloexecRecvFDPass) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + char control[CMSG_SPACE(sizeof(int))]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + struct iovec iov; + char received_data[20]; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CMSG_CLOEXEC), + SyscallSucceedsWithValue(sizeof(received_data))); + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); + ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET); + ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS); + + int fd = -1; + memcpy(&fd, CMSG_DATA(cmsg), sizeof(int)); + + EXPECT_THAT(fcntl(fd, F_GETFD), SyscallSucceedsWithValue(FD_CLOEXEC)); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCredWithoutCredSpace) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + SetSoPassCred(sockets->second_fd()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + char control[CMSG_LEN(0)]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[20]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + + EXPECT_EQ(msg.msg_controllen, sizeof(control)); + + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + EXPECT_EQ(cmsg->cmsg_len, sizeof(control)); + EXPECT_EQ(cmsg->cmsg_level, SOL_SOCKET); + EXPECT_EQ(cmsg->cmsg_type, SCM_CREDENTIALS); +} + +// This test will validate that MSG_CTRUNC as an input flag to recvmsg will +// not appear as an output flag on the control message when truncation doesn't +// happen. +TEST_P(UnixSocketPairCmsgTest, MsgCtruncInputIsNoop) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + char control[CMSG_SPACE(sizeof(int)) /* we're passing a single fd */]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + struct iovec iov; + char received_data[20]; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, MSG_CTRUNC), + SyscallSucceedsWithValue(sizeof(received_data))); + struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(cmsg, nullptr); + ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int))); + ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET); + ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS); + + // Now we should verify that MSG_CTRUNC wasn't set as an output flag. + EXPECT_EQ(msg.msg_flags & MSG_CTRUNC, 0); +} + +TEST_P(UnixSocketPairCmsgTest, FDPassAfterSoPassCredWithoutCredHeaderSpace) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + + char sent_data[20]; + RandomizeBuffer(sent_data, sizeof(sent_data)); + + auto pair = + ASSERT_NO_ERRNO_AND_VALUE(UnixDomainSocketPair(SOCK_SEQPACKET).Create()); + + SetSoPassCred(sockets->second_fd()); + + ASSERT_NO_FATAL_FAILURE(SendSingleFD(sockets->first_fd(), pair->second_fd(), + sent_data, sizeof(sent_data))); + + struct msghdr msg = {}; + char control[CMSG_LEN(0) / 2]; + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + char received_data[20]; + struct iovec iov; + iov.iov_base = received_data; + iov.iov_len = sizeof(received_data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + ASSERT_THAT(RetryEINTR(recvmsg)(sockets->second_fd(), &msg, 0), + SyscallSucceedsWithValue(sizeof(received_data))); + + EXPECT_EQ(0, memcmp(sent_data, received_data, sizeof(sent_data))); + EXPECT_EQ(msg.msg_controllen, 0); +} + +} // namespace + +} // namespace testing +} // namespace gvisor diff --git a/test/syscalls/linux/socket_unix_cmsg.h b/test/syscalls/linux/socket_unix_cmsg.h new file mode 100644 index 000000000..431606903 --- /dev/null +++ b/test/syscalls/linux/socket_unix_cmsg.h @@ -0,0 +1,30 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_ +#define GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_ + +#include "test/syscalls/linux/socket_test_util.h" + +namespace gvisor { +namespace testing { + +// Test fixture for tests that apply to pairs of connected unix sockets about +// control messages. +using UnixSocketPairCmsgTest = SocketPairTest; + +} // namespace testing +} // namespace gvisor + +#endif // GVISOR_TEST_SYSCALLS_LINUX_SOCKET_UNIX_CMSG_H_ diff --git a/test/syscalls/linux/socket_unix_pair.cc b/test/syscalls/linux/socket_unix_pair.cc index bacfc11e4..411fb4518 100644 --- a/test/syscalls/linux/socket_unix_pair.cc +++ b/test/syscalls/linux/socket_unix_pair.cc @@ -16,6 +16,7 @@ #include "test/syscalls/linux/socket_test_util.h" #include "test/syscalls/linux/socket_unix.h" +#include "test/syscalls/linux/socket_unix_cmsg.h" #include "test/syscalls/linux/unix_domain_socket_test_util.h" #include "test/util/test_util.h" @@ -33,5 +34,9 @@ INSTANTIATE_TEST_SUITE_P( AllUnixDomainSockets, UnixSocketPairTest, ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); +INSTANTIATE_TEST_SUITE_P( + AllUnixDomainSockets, UnixSocketPairCmsgTest, + ::testing::ValuesIn(IncludeReversals(GetSocketPairs()))); + } // namespace testing } // namespace gvisor -- cgit v1.2.3 From 81eafb2c5e6242251618456f7e2a5657133a103c Mon Sep 17 00:00:00 2001 From: Googler Date: Thu, 6 Jun 2019 12:26:01 -0700 Subject: Internal change. PiperOrigin-RevId: 251902567 --- test/syscalls/linux/pipe.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'test/syscalls/linux') diff --git a/test/syscalls/linux/pipe.cc b/test/syscalls/linux/pipe.cc index bce351e08..67b93ecf5 100644 --- a/test/syscalls/linux/pipe.cc +++ b/test/syscalls/linux/pipe.cc @@ -55,7 +55,7 @@ class PipeTest : public ::testing::TestWithParam { FileDescriptor wfd; public: - static void SetUpTestCase() { + static void SetUpTestSuite() { // Tests intentionally generate SIGPIPE. TEST_PCHECK(signal(SIGPIPE, SIG_IGN) != SIG_ERR); } @@ -82,7 +82,7 @@ class PipeTest : public ::testing::TestWithParam { return s1; } - static void TearDownTestCase() { + static void TearDownTestSuite() { TEST_PCHECK(signal(SIGPIPE, SIG_DFL) != SIG_ERR); } -- cgit v1.2.3 From 8b8bd8d5b28a8e41f59fc3465c38964986bfb084 Mon Sep 17 00:00:00 2001 From: Rahat Mahmood Date: Thu, 6 Jun 2019 14:30:50 -0700 Subject: Try increase listen backlog. PiperOrigin-RevId: 251928000 --- test/syscalls/linux/socket_inet_loopback.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/syscalls/linux') diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc index b216d14cb..9b3b70b01 100644 --- a/test/syscalls/linux/socket_inet_loopback.cc +++ b/test/syscalls/linux/socket_inet_loopback.cc @@ -198,7 +198,7 @@ TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread) { ASSERT_THAT( bind(fd, reinterpret_cast(&listen_addr), listener.addr_len), SyscallSucceeds()); - ASSERT_THAT(listen(fd, 40), SyscallSucceeds()); + ASSERT_THAT(listen(fd, 512), SyscallSucceeds()); // On the first bind we need to determine which port was bound. if (i != 0) { -- cgit v1.2.3 From 2d2831e3541c8ae3c84f17cfd1bf0a26f2027044 Mon Sep 17 00:00:00 2001 From: Rahat Mahmood Date: Thu, 6 Jun 2019 15:03:44 -0700 Subject: Track and export socket state. This is necessary for implementing network diagnostic interfaces like /proc/net/{tcp,udp,unix} and sock_diag(7). For pass-through endpoints such as hostinet, we obtain the socket state from the backend. For netstack, we add explicit tracking of TCP states. PiperOrigin-RevId: 251934850 --- pkg/abi/linux/socket.go | 16 ++ pkg/sentry/fs/proc/net.go | 20 +-- pkg/sentry/socket/epsocket/epsocket.go | 44 +++++ pkg/sentry/socket/hostinet/socket.go | 24 +++ pkg/sentry/socket/netlink/socket.go | 5 + pkg/sentry/socket/rpcinet/socket.go | 6 + pkg/sentry/socket/socket.go | 4 + pkg/sentry/socket/unix/transport/BUILD | 1 + pkg/sentry/socket/unix/transport/connectioned.go | 9 ++ pkg/sentry/socket/unix/transport/connectionless.go | 16 ++ pkg/sentry/socket/unix/transport/unix.go | 4 + pkg/sentry/socket/unix/unix.go | 5 + pkg/tcpip/stack/transport_test.go | 4 + pkg/tcpip/tcpip.go | 4 + pkg/tcpip/transport/icmp/BUILD | 1 + pkg/tcpip/transport/icmp/endpoint.go | 6 + pkg/tcpip/transport/raw/endpoint.go | 5 + pkg/tcpip/transport/tcp/accept.go | 12 +- pkg/tcpip/transport/tcp/connect.go | 26 ++- pkg/tcpip/transport/tcp/endpoint.go | 174 ++++++++++++++------ pkg/tcpip/transport/tcp/endpoint_state.go | 42 ++--- pkg/tcpip/transport/tcp/rcv.go | 37 +++++ pkg/tcpip/transport/tcp/snd.go | 4 + pkg/tcpip/transport/tcp/tcp_test.go | 131 ++++++++++++--- pkg/tcpip/transport/tcp/testing/context/context.go | 39 ++++- pkg/tcpip/transport/udp/endpoint.go | 6 + test/syscalls/linux/proc_net_unix.cc | 178 +++++++++++++++++++++ 27 files changed, 696 insertions(+), 127 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/abi/linux/socket.go b/pkg/abi/linux/socket.go index 417840731..44bd69df6 100644 --- a/pkg/abi/linux/socket.go +++ b/pkg/abi/linux/socket.go @@ -200,6 +200,22 @@ const ( SS_DISCONNECTING = 4 // In process of disconnecting. ) +// TCP protocol states, from include/net/tcp_states.h. +const ( + TCP_ESTABLISHED uint32 = iota + 1 + TCP_SYN_SENT + TCP_SYN_RECV + TCP_FIN_WAIT1 + TCP_FIN_WAIT2 + TCP_TIME_WAIT + TCP_CLOSE + TCP_CLOSE_WAIT + TCP_LAST_ACK + TCP_LISTEN + TCP_CLOSING + TCP_NEW_SYN_RECV +) + // SockAddrMax is the maximum size of a struct sockaddr, from // uapi/linux/socket.h. const SockAddrMax = 128 diff --git a/pkg/sentry/fs/proc/net.go b/pkg/sentry/fs/proc/net.go index 4a107c739..3daaa962c 100644 --- a/pkg/sentry/fs/proc/net.go +++ b/pkg/sentry/fs/proc/net.go @@ -240,24 +240,6 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s } } - var sockState int - switch sops.Endpoint().Type() { - case linux.SOCK_DGRAM: - sockState = linux.SS_CONNECTING - // Unlike Linux, we don't have unbound connection-less sockets, - // so no SS_DISCONNECTING. - - case linux.SOCK_SEQPACKET: - fallthrough - case linux.SOCK_STREAM: - // Connectioned. - if sops.Endpoint().(transport.ConnectingEndpoint).Connected() { - sockState = linux.SS_CONNECTED - } else { - sockState = linux.SS_UNCONNECTED - } - } - // In the socket entry below, the value for the 'Num' field requires // some consideration. Linux prints the address to the struct // unix_sock representing a socket in the kernel, but may redact the @@ -282,7 +264,7 @@ func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]s 0, // Protocol, always 0 for UDS. sockFlags, // Flags. sops.Endpoint().Type(), // Type. - sockState, // State. + sops.State(), // State. sfile.InodeID(), // Inode. ) diff --git a/pkg/sentry/socket/epsocket/epsocket.go b/pkg/sentry/socket/epsocket/epsocket.go index de4b963da..f91c5127a 100644 --- a/pkg/sentry/socket/epsocket/epsocket.go +++ b/pkg/sentry/socket/epsocket/epsocket.go @@ -52,6 +52,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/tcpip" "gvisor.googlesource.com/gvisor/pkg/tcpip/buffer" "gvisor.googlesource.com/gvisor/pkg/tcpip/stack" + "gvisor.googlesource.com/gvisor/pkg/tcpip/transport/tcp" "gvisor.googlesource.com/gvisor/pkg/waiter" ) @@ -2281,3 +2282,46 @@ func nicStateFlagsToLinux(f stack.NICStateFlags) uint32 { } return rv } + +// State implements socket.Socket.State. State translates the internal state +// returned by netstack to values defined by Linux. +func (s *SocketOperations) State() uint32 { + if s.family != linux.AF_INET && s.family != linux.AF_INET6 { + // States not implemented for this socket's family. + return 0 + } + + if !s.isPacketBased() { + // TCP socket. + switch tcp.EndpointState(s.Endpoint.State()) { + case tcp.StateEstablished: + return linux.TCP_ESTABLISHED + case tcp.StateSynSent: + return linux.TCP_SYN_SENT + case tcp.StateSynRecv: + return linux.TCP_SYN_RECV + case tcp.StateFinWait1: + return linux.TCP_FIN_WAIT1 + case tcp.StateFinWait2: + return linux.TCP_FIN_WAIT2 + case tcp.StateTimeWait: + return linux.TCP_TIME_WAIT + case tcp.StateClose, tcp.StateInitial, tcp.StateBound, tcp.StateConnecting, tcp.StateError: + return linux.TCP_CLOSE + case tcp.StateCloseWait: + return linux.TCP_CLOSE_WAIT + case tcp.StateLastAck: + return linux.TCP_LAST_ACK + case tcp.StateListen: + return linux.TCP_LISTEN + case tcp.StateClosing: + return linux.TCP_CLOSING + default: + // Internal or unknown state. + return 0 + } + } + + // TODO(b/112063468): Export states for UDP, ICMP, and raw sockets. + return 0 +} diff --git a/pkg/sentry/socket/hostinet/socket.go b/pkg/sentry/socket/hostinet/socket.go index 41f9693bb..0d75580a3 100644 --- a/pkg/sentry/socket/hostinet/socket.go +++ b/pkg/sentry/socket/hostinet/socket.go @@ -19,7 +19,9 @@ import ( "syscall" "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/binary" "gvisor.googlesource.com/gvisor/pkg/fdnotifier" + "gvisor.googlesource.com/gvisor/pkg/log" "gvisor.googlesource.com/gvisor/pkg/sentry/context" "gvisor.googlesource.com/gvisor/pkg/sentry/fs" "gvisor.googlesource.com/gvisor/pkg/sentry/fs/fsutil" @@ -519,6 +521,28 @@ func translateIOSyscallError(err error) error { return err } +// State implements socket.Socket.State. +func (s *socketOperations) State() uint32 { + info := linux.TCPInfo{} + buf, err := getsockopt(s.fd, syscall.SOL_TCP, syscall.TCP_INFO, linux.SizeOfTCPInfo) + if err != nil { + if err != syscall.ENOPROTOOPT { + log.Warningf("Failed to get TCP socket info from %+v: %v", s, err) + } + // For non-TCP sockets, silently ignore the failure. + return 0 + } + if len(buf) != linux.SizeOfTCPInfo { + // Unmarshal below will panic if getsockopt returns a buffer of + // unexpected size. + log.Warningf("Failed to get TCP socket info from %+v: getsockopt(2) returned %d bytes, expecting %d bytes.", s, len(buf), linux.SizeOfTCPInfo) + return 0 + } + + binary.Unmarshal(buf, usermem.ByteOrder, &info) + return uint32(info.State) +} + type socketProvider struct { family int } diff --git a/pkg/sentry/socket/netlink/socket.go b/pkg/sentry/socket/netlink/socket.go index afd06ca33..16c79aa33 100644 --- a/pkg/sentry/socket/netlink/socket.go +++ b/pkg/sentry/socket/netlink/socket.go @@ -616,3 +616,8 @@ func (s *Socket) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, n, err := s.sendMsg(ctx, src, nil, 0, socket.ControlMessages{}) return int64(n), err.ToError() } + +// State implements socket.Socket.State. +func (s *Socket) State() uint32 { + return s.ep.State() +} diff --git a/pkg/sentry/socket/rpcinet/socket.go b/pkg/sentry/socket/rpcinet/socket.go index 55e0b6665..bf42bdf69 100644 --- a/pkg/sentry/socket/rpcinet/socket.go +++ b/pkg/sentry/socket/rpcinet/socket.go @@ -830,6 +830,12 @@ func (s *socketOperations) SendMsg(t *kernel.Task, src usermem.IOSequence, to [] } } +// State implements socket.Socket.State. +func (s *socketOperations) State() uint32 { + // TODO(b/127845868): Define a new rpc to query the socket state. + return 0 +} + type socketProvider struct { family int } diff --git a/pkg/sentry/socket/socket.go b/pkg/sentry/socket/socket.go index 9393acd28..a99423365 100644 --- a/pkg/sentry/socket/socket.go +++ b/pkg/sentry/socket/socket.go @@ -116,6 +116,10 @@ type Socket interface { // SendTimeout gets the current timeout (in ns) for send operations. Zero // means no timeout, and negative means DONTWAIT. SendTimeout() int64 + + // State returns the current state of the socket, as represented by Linux in + // procfs. The returned state value is protocol-specific. + State() uint32 } // Provider is the interface implemented by providers of sockets for specific diff --git a/pkg/sentry/socket/unix/transport/BUILD b/pkg/sentry/socket/unix/transport/BUILD index 5a2de0c4c..52f324eed 100644 --- a/pkg/sentry/socket/unix/transport/BUILD +++ b/pkg/sentry/socket/unix/transport/BUILD @@ -28,6 +28,7 @@ go_library( importpath = "gvisor.googlesource.com/gvisor/pkg/sentry/socket/unix/transport", visibility = ["//:sandbox"], deps = [ + "//pkg/abi/linux", "//pkg/ilist", "//pkg/refs", "//pkg/syserr", diff --git a/pkg/sentry/socket/unix/transport/connectioned.go b/pkg/sentry/socket/unix/transport/connectioned.go index 18e492862..9c8ec0365 100644 --- a/pkg/sentry/socket/unix/transport/connectioned.go +++ b/pkg/sentry/socket/unix/transport/connectioned.go @@ -17,6 +17,7 @@ package transport import ( "sync" + "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/syserr" "gvisor.googlesource.com/gvisor/pkg/tcpip" "gvisor.googlesource.com/gvisor/pkg/waiter" @@ -458,3 +459,11 @@ func (e *connectionedEndpoint) Readiness(mask waiter.EventMask) waiter.EventMask return ready } + +// State implements socket.Socket.State. +func (e *connectionedEndpoint) State() uint32 { + if e.Connected() { + return linux.SS_CONNECTED + } + return linux.SS_UNCONNECTED +} diff --git a/pkg/sentry/socket/unix/transport/connectionless.go b/pkg/sentry/socket/unix/transport/connectionless.go index 43ff875e4..c034cf984 100644 --- a/pkg/sentry/socket/unix/transport/connectionless.go +++ b/pkg/sentry/socket/unix/transport/connectionless.go @@ -15,6 +15,7 @@ package transport import ( + "gvisor.googlesource.com/gvisor/pkg/abi/linux" "gvisor.googlesource.com/gvisor/pkg/syserr" "gvisor.googlesource.com/gvisor/pkg/tcpip" "gvisor.googlesource.com/gvisor/pkg/waiter" @@ -194,3 +195,18 @@ func (e *connectionlessEndpoint) Readiness(mask waiter.EventMask) waiter.EventMa return ready } + +// State implements socket.Socket.State. +func (e *connectionlessEndpoint) State() uint32 { + e.Lock() + defer e.Unlock() + + switch { + case e.isBound(): + return linux.SS_UNCONNECTED + case e.Connected(): + return linux.SS_CONNECTING + default: + return linux.SS_DISCONNECTING + } +} diff --git a/pkg/sentry/socket/unix/transport/unix.go b/pkg/sentry/socket/unix/transport/unix.go index 37d82bb6b..5fc09af55 100644 --- a/pkg/sentry/socket/unix/transport/unix.go +++ b/pkg/sentry/socket/unix/transport/unix.go @@ -191,6 +191,10 @@ type Endpoint interface { // GetSockOpt gets a socket option. opt should be a pointer to one of the // tcpip.*Option types. GetSockOpt(opt interface{}) *tcpip.Error + + // State returns the current state of the socket, as represented by Linux in + // procfs. + State() uint32 } // A Credentialer is a socket or endpoint that supports the SO_PASSCRED socket diff --git a/pkg/sentry/socket/unix/unix.go b/pkg/sentry/socket/unix/unix.go index 388cc0d8b..375542350 100644 --- a/pkg/sentry/socket/unix/unix.go +++ b/pkg/sentry/socket/unix/unix.go @@ -596,6 +596,11 @@ func (s *SocketOperations) RecvMsg(t *kernel.Task, dst usermem.IOSequence, flags } } +// State implements socket.Socket.State. +func (s *SocketOperations) State() uint32 { + return s.ep.State() +} + // provider is a unix domain socket provider. type provider struct{} diff --git a/pkg/tcpip/stack/transport_test.go b/pkg/tcpip/stack/transport_test.go index 8d74f1543..e8a9392b5 100644 --- a/pkg/tcpip/stack/transport_test.go +++ b/pkg/tcpip/stack/transport_test.go @@ -188,6 +188,10 @@ func (f *fakeTransportEndpoint) HandleControlPacket(stack.TransportEndpointID, s f.proto.controlCount++ } +func (f *fakeTransportEndpoint) State() uint32 { + return 0 +} + type fakeTransportGoodOption bool type fakeTransportBadOption bool diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go index f9886c6e4..85ef014d0 100644 --- a/pkg/tcpip/tcpip.go +++ b/pkg/tcpip/tcpip.go @@ -377,6 +377,10 @@ type Endpoint interface { // GetSockOpt gets a socket option. opt should be a pointer to one of the // *Option types. GetSockOpt(opt interface{}) *Error + + // State returns a socket's lifecycle state. The returned value is + // protocol-specific and is primarily used for diagnostics. + State() uint32 } // WriteOptions contains options for Endpoint.Write. diff --git a/pkg/tcpip/transport/icmp/BUILD b/pkg/tcpip/transport/icmp/BUILD index 9aa6f3978..84a2b53b7 100644 --- a/pkg/tcpip/transport/icmp/BUILD +++ b/pkg/tcpip/transport/icmp/BUILD @@ -33,6 +33,7 @@ go_library( "//pkg/tcpip/header", "//pkg/tcpip/stack", "//pkg/tcpip/transport/raw", + "//pkg/tcpip/transport/tcp", "//pkg/waiter", ], ) diff --git a/pkg/tcpip/transport/icmp/endpoint.go b/pkg/tcpip/transport/icmp/endpoint.go index e2b90ef10..b8005093a 100644 --- a/pkg/tcpip/transport/icmp/endpoint.go +++ b/pkg/tcpip/transport/icmp/endpoint.go @@ -708,3 +708,9 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv // HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket. func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, vv buffer.VectorisedView) { } + +// State implements tcpip.Endpoint.State. The ICMP endpoint currently doesn't +// expose internal socket state. +func (e *endpoint) State() uint32 { + return 0 +} diff --git a/pkg/tcpip/transport/raw/endpoint.go b/pkg/tcpip/transport/raw/endpoint.go index 1daf5823f..e4ff50c91 100644 --- a/pkg/tcpip/transport/raw/endpoint.go +++ b/pkg/tcpip/transport/raw/endpoint.go @@ -519,3 +519,8 @@ func (ep *endpoint) HandlePacket(route *stack.Route, netHeader buffer.View, vv b ep.waiterQueue.Notify(waiter.EventIn) } } + +// State implements socket.Socket.State. +func (ep *endpoint) State() uint32 { + return 0 +} diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index 31e365ae5..a32e20b06 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -226,7 +226,6 @@ func (l *listenContext) createConnectingEndpoint(s *segment, iss seqnum.Value, i } n.isRegistered = true - n.state = stateConnecting // Create sender and receiver. // @@ -258,8 +257,9 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head ep.Close() return nil, err } - - ep.state = stateConnected + ep.mu.Lock() + ep.state = StateEstablished + ep.mu.Unlock() // Update the receive window scaling. We can't do it before the // handshake because it's possible that the peer doesn't support window @@ -276,7 +276,7 @@ func (e *endpoint) deliverAccepted(n *endpoint) { e.mu.RLock() state := e.state e.mu.RUnlock() - if state == stateListen { + if state == StateListen { e.acceptedChan <- n e.waiterQueue.Notify(waiter.EventIn) } else { @@ -406,7 +406,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { n.tsOffset = 0 // Switch state to connected. - n.state = stateConnected + n.state = StateEstablished // Do the delivery in a separate goroutine so // that we don't block the listen loop in case @@ -429,7 +429,7 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error { // handleSynSegment() from attempting to queue new connections // to the endpoint. e.mu.Lock() - e.state = stateClosed + e.state = StateClose // Do cleanup if needed. e.completeWorkerLocked() diff --git a/pkg/tcpip/transport/tcp/connect.go b/pkg/tcpip/transport/tcp/connect.go index 371d2ed29..0ad7bfb38 100644 --- a/pkg/tcpip/transport/tcp/connect.go +++ b/pkg/tcpip/transport/tcp/connect.go @@ -151,6 +151,9 @@ func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts *hea h.mss = opts.MSS h.sndWndScale = opts.WS h.listenEP = listenEP + h.ep.mu.Lock() + h.ep.state = StateSynRecv + h.ep.mu.Unlock() } // checkAck checks if the ACK number, if present, of a segment received during @@ -219,6 +222,9 @@ func (h *handshake) synSentState(s *segment) *tcpip.Error { // but resend our own SYN and wait for it to be acknowledged in the // SYN-RCVD state. h.state = handshakeSynRcvd + h.ep.mu.Lock() + h.ep.state = StateSynRecv + h.ep.mu.Unlock() synOpts := header.TCPSynOptions{ WS: h.rcvWndScale, TS: rcvSynOpts.TS, @@ -668,7 +674,7 @@ func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte { // sendRaw sends a TCP segment to the endpoint's peer. func (e *endpoint) sendRaw(data buffer.VectorisedView, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size) *tcpip.Error { var sackBlocks []header.SACKBlock - if e.state == stateConnected && e.rcv.pendingBufSize > 0 && (flags&header.TCPFlagAck != 0) { + if e.state == StateEstablished && e.rcv.pendingBufSize > 0 && (flags&header.TCPFlagAck != 0) { sackBlocks = e.sack.Blocks[:e.sack.NumBlocks] } options := e.makeOptions(sackBlocks) @@ -719,8 +725,7 @@ func (e *endpoint) handleClose() *tcpip.Error { // protocol goroutine. func (e *endpoint) resetConnectionLocked(err *tcpip.Error) { e.sendRaw(buffer.VectorisedView{}, header.TCPFlagAck|header.TCPFlagRst, e.snd.sndUna, e.rcv.rcvNxt, 0) - - e.state = stateError + e.state = StateError e.hardError = err } @@ -876,14 +881,19 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error { // handshake, and then inform potential waiters about its // completion. h := newHandshake(e, seqnum.Size(e.receiveBufferAvailable())) + e.mu.Lock() + h.ep.state = StateSynSent + e.mu.Unlock() + if err := h.execute(); err != nil { e.lastErrorMu.Lock() e.lastError = err e.lastErrorMu.Unlock() e.mu.Lock() - e.state = stateError + e.state = StateError e.hardError = err + // Lock released below. epilogue() @@ -905,7 +915,7 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error { // Tell waiters that the endpoint is connected and writable. e.mu.Lock() - e.state = stateConnected + e.state = StateEstablished drained := e.drainDone != nil e.mu.Unlock() if drained { @@ -1005,7 +1015,7 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error { return err } } - if e.state != stateError { + if e.state != StateError { close(e.drainDone) <-e.undrain } @@ -1061,8 +1071,8 @@ func (e *endpoint) protocolMainLoop(handshake bool) *tcpip.Error { // Mark endpoint as closed. e.mu.Lock() - if e.state != stateError { - e.state = stateClosed + if e.state != StateError { + e.state = StateClose } // Lock released below. epilogue() diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go index fd697402e..23422ca5e 100644 --- a/pkg/tcpip/transport/tcp/endpoint.go +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -32,18 +32,81 @@ import ( "gvisor.googlesource.com/gvisor/pkg/waiter" ) -type endpointState int +// EndpointState represents the state of a TCP endpoint. +type EndpointState uint32 +// Endpoint states. Note that are represented in a netstack-specific manner and +// may not be meaningful externally. Specifically, they need to be translated to +// Linux's representation for these states if presented to userspace. const ( - stateInitial endpointState = iota - stateBound - stateListen - stateConnecting - stateConnected - stateClosed - stateError + // Endpoint states internal to netstack. These map to the TCP state CLOSED. + StateInitial EndpointState = iota + StateBound + StateConnecting // Connect() called, but the initial SYN hasn't been sent. + StateError + + // TCP protocol states. + StateEstablished + StateSynSent + StateSynRecv + StateFinWait1 + StateFinWait2 + StateTimeWait + StateClose + StateCloseWait + StateLastAck + StateListen + StateClosing ) +// connected is the set of states where an endpoint is connected to a peer. +func (s EndpointState) connected() bool { + switch s { + case StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing: + return true + default: + return false + } +} + +// String implements fmt.Stringer.String. +func (s EndpointState) String() string { + switch s { + case StateInitial: + return "INITIAL" + case StateBound: + return "BOUND" + case StateConnecting: + return "CONNECTING" + case StateError: + return "ERROR" + case StateEstablished: + return "ESTABLISHED" + case StateSynSent: + return "SYN-SENT" + case StateSynRecv: + return "SYN-RCVD" + case StateFinWait1: + return "FIN-WAIT1" + case StateFinWait2: + return "FIN-WAIT2" + case StateTimeWait: + return "TIME-WAIT" + case StateClose: + return "CLOSED" + case StateCloseWait: + return "CLOSE-WAIT" + case StateLastAck: + return "LAST-ACK" + case StateListen: + return "LISTEN" + case StateClosing: + return "CLOSING" + default: + panic("unreachable") + } +} + // Reasons for notifying the protocol goroutine. const ( notifyNonZeroReceiveWindow = 1 << iota @@ -108,10 +171,14 @@ type endpoint struct { rcvBufUsed int // The following fields are protected by the mutex. - mu sync.RWMutex `state:"nosave"` - id stack.TransportEndpointID - state endpointState `state:".(endpointState)"` - isPortReserved bool `state:"manual"` + mu sync.RWMutex `state:"nosave"` + id stack.TransportEndpointID + + // state endpointState `state:".(endpointState)"` + // pState ProtocolState + state EndpointState `state:".(EndpointState)"` + + isPortReserved bool `state:"manual"` isRegistered bool boundNICID tcpip.NICID `state:"manual"` route stack.Route `state:"manual"` @@ -304,6 +371,7 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite stack: stack, netProto: netProto, waiterQueue: waiterQueue, + state: StateInitial, rcvBufSize: DefaultBufferSize, sndBufSize: DefaultBufferSize, sndMTU: int(math.MaxInt32), @@ -351,14 +419,14 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { defer e.mu.RUnlock() switch e.state { - case stateInitial, stateBound, stateConnecting: + case StateInitial, StateBound, StateConnecting, StateSynSent, StateSynRecv: // Ready for nothing. - case stateClosed, stateError: + case StateClose, StateError: // Ready for anything. result = mask - case stateListen: + case StateListen: // Check if there's anything in the accepted channel. if (mask & waiter.EventIn) != 0 { if len(e.acceptedChan) > 0 { @@ -366,7 +434,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask { } } - case stateConnected: + case StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing: // Determine if the endpoint is writable if requested. if (mask & waiter.EventOut) != 0 { e.sndBufMu.Lock() @@ -427,7 +495,7 @@ func (e *endpoint) Close() { // are immediately available for reuse after Close() is called. If also // registered, we unregister as well otherwise the next user would fail // in Listen() when trying to register. - if e.state == stateListen && e.isPortReserved { + if e.state == StateListen && e.isPortReserved { if e.isRegistered { e.stack.UnregisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.id, e) e.isRegistered = false @@ -487,15 +555,15 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, e.mu.RLock() // The endpoint can be read if it's connected, or if it's already closed // but has some pending unread data. Also note that a RST being received - // would cause the state to become stateError so we should allow the + // would cause the state to become StateError so we should allow the // reads to proceed before returning a ECONNRESET. e.rcvListMu.Lock() bufUsed := e.rcvBufUsed - if s := e.state; s != stateConnected && s != stateClosed && bufUsed == 0 { + if s := e.state; !s.connected() && s != StateClose && bufUsed == 0 { e.rcvListMu.Unlock() he := e.hardError e.mu.RUnlock() - if s == stateError { + if s == StateError { return buffer.View{}, tcpip.ControlMessages{}, he } return buffer.View{}, tcpip.ControlMessages{}, tcpip.ErrInvalidEndpointState @@ -511,7 +579,7 @@ func (e *endpoint) Read(*tcpip.FullAddress) (buffer.View, tcpip.ControlMessages, func (e *endpoint) readLocked() (buffer.View, *tcpip.Error) { if e.rcvBufUsed == 0 { - if e.rcvClosed || e.state != stateConnected { + if e.rcvClosed || !e.state.connected() { return buffer.View{}, tcpip.ErrClosedForReceive } return buffer.View{}, tcpip.ErrWouldBlock @@ -547,9 +615,9 @@ func (e *endpoint) Write(p tcpip.Payload, opts tcpip.WriteOptions) (uintptr, <-c defer e.mu.RUnlock() // The endpoint cannot be written to if it's not connected. - if e.state != stateConnected { + if !e.state.connected() { switch e.state { - case stateError: + case StateError: return 0, nil, e.hardError default: return 0, nil, tcpip.ErrClosedForSend @@ -612,8 +680,8 @@ func (e *endpoint) Peek(vec [][]byte) (uintptr, tcpip.ControlMessages, *tcpip.Er // The endpoint can be read if it's connected, or if it's already closed // but has some pending unread data. - if s := e.state; s != stateConnected && s != stateClosed { - if s == stateError { + if s := e.state; !s.connected() && s != StateClose { + if s == StateError { return 0, tcpip.ControlMessages{}, e.hardError } return 0, tcpip.ControlMessages{}, tcpip.ErrInvalidEndpointState @@ -623,7 +691,7 @@ func (e *endpoint) Peek(vec [][]byte) (uintptr, tcpip.ControlMessages, *tcpip.Er defer e.rcvListMu.Unlock() if e.rcvBufUsed == 0 { - if e.rcvClosed || e.state != stateConnected { + if e.rcvClosed || !e.state.connected() { return 0, tcpip.ControlMessages{}, tcpip.ErrClosedForReceive } return 0, tcpip.ControlMessages{}, tcpip.ErrWouldBlock @@ -789,7 +857,7 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error { defer e.mu.Unlock() // We only allow this to be set when we're in the initial state. - if e.state != stateInitial { + if e.state != StateInitial { return tcpip.ErrInvalidEndpointState } @@ -841,7 +909,7 @@ func (e *endpoint) readyReceiveSize() (int, *tcpip.Error) { defer e.mu.RUnlock() // The endpoint cannot be in listen state. - if e.state == stateListen { + if e.state == StateListen { return 0, tcpip.ErrInvalidEndpointState } @@ -1057,7 +1125,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) (er nicid := addr.NIC switch e.state { - case stateBound: + case StateBound: // If we're already bound to a NIC but the caller is requesting // that we use a different one now, we cannot proceed. if e.boundNICID == 0 { @@ -1070,16 +1138,16 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) (er nicid = e.boundNICID - case stateInitial: - // Nothing to do. We'll eventually fill-in the gaps in the ID - // (if any) when we find a route. + case StateInitial: + // Nothing to do. We'll eventually fill-in the gaps in the ID (if any) + // when we find a route. - case stateConnecting: - // A connection request has already been issued but hasn't - // completed yet. + case StateConnecting, StateSynSent, StateSynRecv: + // A connection request has already been issued but hasn't completed + // yet. return tcpip.ErrAlreadyConnecting - case stateConnected: + case StateEstablished: // The endpoint is already connected. If caller hasn't been notified yet, return success. if !e.isConnectNotified { e.isConnectNotified = true @@ -1088,7 +1156,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) (er // Otherwise return that it's already connected. return tcpip.ErrAlreadyConnected - case stateError: + case StateError: return e.hardError default: @@ -1154,7 +1222,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) (er } e.isRegistered = true - e.state = stateConnecting + e.state = StateConnecting e.route = r.Clone() e.boundNICID = nicid e.effectiveNetProtos = netProtos @@ -1175,7 +1243,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) (er } e.segmentQueue.mu.Unlock() e.snd.updateMaxPayloadSize(int(e.route.MTU()), 0) - e.state = stateConnected + e.state = StateEstablished } if run { @@ -1199,8 +1267,8 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { defer e.mu.Unlock() e.shutdownFlags |= flags - switch e.state { - case stateConnected: + switch { + case e.state.connected(): // Close for read. if (e.shutdownFlags & tcpip.ShutdownRead) != 0 { // Mark read side as closed. @@ -1241,7 +1309,7 @@ func (e *endpoint) Shutdown(flags tcpip.ShutdownFlags) *tcpip.Error { e.sndCloseWaker.Assert() } - case stateListen: + case e.state == StateListen: // Tell protocolListenLoop to stop. if flags&tcpip.ShutdownRead != 0 { e.notifyProtocolGoroutine(notifyClose) @@ -1269,7 +1337,7 @@ func (e *endpoint) Listen(backlog int) (err *tcpip.Error) { // When the endpoint shuts down, it sets workerCleanup to true, and from // that point onward, acceptedChan is the responsibility of the cleanup() // method (and should not be touched anywhere else, including here). - if e.state == stateListen && !e.workerCleanup { + if e.state == StateListen && !e.workerCleanup { // Adjust the size of the channel iff we can fix existing // pending connections into the new one. if len(e.acceptedChan) > backlog { @@ -1288,7 +1356,7 @@ func (e *endpoint) Listen(backlog int) (err *tcpip.Error) { } // Endpoint must be bound before it can transition to listen mode. - if e.state != stateBound { + if e.state != StateBound { return tcpip.ErrInvalidEndpointState } @@ -1298,7 +1366,7 @@ func (e *endpoint) Listen(backlog int) (err *tcpip.Error) { } e.isRegistered = true - e.state = stateListen + e.state = StateListen if e.acceptedChan == nil { e.acceptedChan = make(chan *endpoint, backlog) } @@ -1325,7 +1393,7 @@ func (e *endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { defer e.mu.RUnlock() // Endpoint must be in listen state before it can accept connections. - if e.state != stateListen { + if e.state != StateListen { return nil, nil, tcpip.ErrInvalidEndpointState } @@ -1353,7 +1421,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) (err *tcpip.Error) { // Don't allow binding once endpoint is not in the initial state // anymore. This is because once the endpoint goes into a connected or // listen state, it is already bound. - if e.state != stateInitial { + if e.state != StateInitial { return tcpip.ErrAlreadyBound } @@ -1408,7 +1476,7 @@ func (e *endpoint) Bind(addr tcpip.FullAddress) (err *tcpip.Error) { } // Mark endpoint as bound. - e.state = stateBound + e.state = StateBound return nil } @@ -1430,7 +1498,7 @@ func (e *endpoint) GetRemoteAddress() (tcpip.FullAddress, *tcpip.Error) { e.mu.RLock() defer e.mu.RUnlock() - if e.state != stateConnected { + if !e.state.connected() { return tcpip.FullAddress{}, tcpip.ErrNotConnected } @@ -1739,3 +1807,11 @@ func (e *endpoint) initGSO() { gso.MaxSize = e.route.GSOMaxSize() e.gso = gso } + +// State implements tcpip.Endpoint.State. It exports the endpoint's protocol +// state for diagnostics. +func (e *endpoint) State() uint32 { + e.mu.Lock() + defer e.mu.Unlock() + return uint32(e.state) +} diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go index e8aed2875..5f30c2374 100644 --- a/pkg/tcpip/transport/tcp/endpoint_state.go +++ b/pkg/tcpip/transport/tcp/endpoint_state.go @@ -49,8 +49,8 @@ func (e *endpoint) beforeSave() { defer e.mu.Unlock() switch e.state { - case stateInitial, stateBound: - case stateConnected: + case StateInitial, StateBound: + case StateEstablished, StateSynSent, StateSynRecv, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing: if e.route.Capabilities()&stack.CapabilitySaveRestore == 0 { if e.route.Capabilities()&stack.CapabilityDisconnectOk == 0 { panic(tcpip.ErrSaveRejection{fmt.Errorf("endpoint cannot be saved in connected state: local %v:%d, remote %v:%d", e.id.LocalAddress, e.id.LocalPort, e.id.RemoteAddress, e.id.RemotePort)}) @@ -66,17 +66,17 @@ func (e *endpoint) beforeSave() { break } fallthrough - case stateListen, stateConnecting: + case StateListen, StateConnecting: e.drainSegmentLocked() - if e.state != stateClosed && e.state != stateError { + if e.state != StateClose && e.state != StateError { if !e.workerRunning { panic("endpoint has no worker running in listen, connecting, or connected state") } break } fallthrough - case stateError, stateClosed: - for e.state == stateError && e.workerRunning { + case StateError, StateClose: + for e.state == StateError && e.workerRunning { e.mu.Unlock() time.Sleep(100 * time.Millisecond) e.mu.Lock() @@ -92,7 +92,7 @@ func (e *endpoint) beforeSave() { panic("endpoint still has waiters upon save") } - if e.state != stateClosed && !((e.state == stateBound || e.state == stateListen) == e.isPortReserved) { + if e.state != StateClose && !((e.state == StateBound || e.state == StateListen) == e.isPortReserved) { panic("endpoints which are not in the closed state must have a reserved port IFF they are in bound or listen state") } } @@ -132,7 +132,7 @@ func (e *endpoint) loadAcceptedChan(acceptedEndpoints []*endpoint) { } // saveState is invoked by stateify. -func (e *endpoint) saveState() endpointState { +func (e *endpoint) saveState() EndpointState { return e.state } @@ -146,15 +146,15 @@ var connectingLoading sync.WaitGroup // Bound endpoint loading happens last. // loadState is invoked by stateify. -func (e *endpoint) loadState(state endpointState) { +func (e *endpoint) loadState(state EndpointState) { // This is to ensure that the loading wait groups include all applicable // endpoints before any asynchronous calls to the Wait() methods. switch state { - case stateConnected: + case StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing: connectedLoading.Add(1) - case stateListen: + case StateListen: listenLoading.Add(1) - case stateConnecting: + case StateConnecting, StateSynSent, StateSynRecv: connectingLoading.Add(1) } e.state = state @@ -168,7 +168,7 @@ func (e *endpoint) afterLoad() { state := e.state switch state { - case stateInitial, stateBound, stateListen, stateConnecting, stateConnected: + case StateInitial, StateBound, StateListen, StateConnecting, StateEstablished: var ss SendBufferSizeOption if err := e.stack.TransportProtocolOption(ProtocolNumber, &ss); err == nil { if e.sndBufSize < ss.Min || e.sndBufSize > ss.Max { @@ -181,7 +181,7 @@ func (e *endpoint) afterLoad() { } bind := func() { - e.state = stateInitial + e.state = StateInitial if len(e.bindAddress) == 0 { e.bindAddress = e.id.LocalAddress } @@ -191,7 +191,7 @@ func (e *endpoint) afterLoad() { } switch state { - case stateConnected: + case StateEstablished, StateFinWait1, StateFinWait2, StateTimeWait, StateCloseWait, StateLastAck, StateClosing: bind() if len(e.connectingAddress) == 0 { // This endpoint is accepted by netstack but not yet by @@ -211,7 +211,7 @@ func (e *endpoint) afterLoad() { panic("endpoint connecting failed: " + err.String()) } connectedLoading.Done() - case stateListen: + case StateListen: tcpip.AsyncLoading.Add(1) go func() { connectedLoading.Wait() @@ -223,7 +223,7 @@ func (e *endpoint) afterLoad() { listenLoading.Done() tcpip.AsyncLoading.Done() }() - case stateConnecting: + case StateConnecting, StateSynSent, StateSynRecv: tcpip.AsyncLoading.Add(1) go func() { connectedLoading.Wait() @@ -235,7 +235,7 @@ func (e *endpoint) afterLoad() { connectingLoading.Done() tcpip.AsyncLoading.Done() }() - case stateBound: + case StateBound: tcpip.AsyncLoading.Add(1) go func() { connectedLoading.Wait() @@ -244,7 +244,7 @@ func (e *endpoint) afterLoad() { bind() tcpip.AsyncLoading.Done() }() - case stateClosed: + case StateClose: if e.isPortReserved { tcpip.AsyncLoading.Add(1) go func() { @@ -252,12 +252,12 @@ func (e *endpoint) afterLoad() { listenLoading.Wait() connectingLoading.Wait() bind() - e.state = stateClosed + e.state = StateClose tcpip.AsyncLoading.Done() }() } fallthrough - case stateError: + case StateError: tcpip.DeleteDanglingEndpoint(e) } } diff --git a/pkg/tcpip/transport/tcp/rcv.go b/pkg/tcpip/transport/tcp/rcv.go index b08a0e356..f02fa6105 100644 --- a/pkg/tcpip/transport/tcp/rcv.go +++ b/pkg/tcpip/transport/tcp/rcv.go @@ -134,6 +134,7 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum // sequence numbers that have been consumed. TrimSACKBlockList(&r.ep.sack, r.rcvNxt) + // Handle FIN or FIN-ACK. if s.flagIsSet(header.TCPFlagFin) { r.rcvNxt++ @@ -144,6 +145,25 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum r.closed = true r.ep.readyToRead(nil) + // We just received a FIN, our next state depends on whether we sent a + // FIN already or not. + r.ep.mu.Lock() + switch r.ep.state { + case StateEstablished: + r.ep.state = StateCloseWait + case StateFinWait1: + if s.flagIsSet(header.TCPFlagAck) { + // FIN-ACK, transition to TIME-WAIT. + r.ep.state = StateTimeWait + } else { + // Simultaneous close, expecting a final ACK. + r.ep.state = StateClosing + } + case StateFinWait2: + r.ep.state = StateTimeWait + } + r.ep.mu.Unlock() + // Flush out any pending segments, except the very first one if // it happens to be the one we're handling now because the // caller is using it. @@ -156,6 +176,23 @@ func (r *receiver) consumeSegment(s *segment, segSeq seqnum.Value, segLen seqnum r.pendingRcvdSegments[i].decRef() } r.pendingRcvdSegments = r.pendingRcvdSegments[:first] + + return true + } + + // Handle ACK (not FIN-ACK, which we handled above) during one of the + // shutdown states. + if s.flagIsSet(header.TCPFlagAck) { + r.ep.mu.Lock() + switch r.ep.state { + case StateFinWait1: + r.ep.state = StateFinWait2 + case StateClosing: + r.ep.state = StateTimeWait + case StateLastAck: + r.ep.state = StateClose + } + r.ep.mu.Unlock() } return true diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go index 3464e4be7..b236d7af2 100644 --- a/pkg/tcpip/transport/tcp/snd.go +++ b/pkg/tcpip/transport/tcp/snd.go @@ -632,6 +632,10 @@ func (s *sender) maybeSendSegment(seg *segment, limit int, end seqnum.Value) (se } seg.flags = header.TCPFlagAck | header.TCPFlagFin segEnd = seg.sequenceNumber.Add(1) + // Transition to FIN-WAIT1 state since we're initiating an active close. + s.ep.mu.Lock() + s.ep.state = StateFinWait1 + s.ep.mu.Unlock() } else { // We're sending a non-FIN segment. if seg.flags&header.TCPFlagFin != 0 { diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go index b8f0ccaf1..56b490aaa 100644 --- a/pkg/tcpip/transport/tcp/tcp_test.go +++ b/pkg/tcpip/transport/tcp/tcp_test.go @@ -168,8 +168,8 @@ func TestTCPResetsSentIncrement(t *testing.T) { // Receive the SYN-ACK reply. b := c.GetPacket() - tcp := header.TCP(header.IPv4(b).Payload()) - c.IRS = seqnum.Value(tcp.SequenceNumber()) + tcpHdr := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcpHdr.SequenceNumber()) ackHeaders := &context.Headers{ SrcPort: context.TestPort, @@ -269,8 +269,8 @@ func TestConnectResetAfterClose(t *testing.T) { time.Sleep(3 * time.Second) for { b := c.GetPacket() - tcp := header.TCP(header.IPv4(b).Payload()) - if tcp.Flags() == header.TCPFlagAck|header.TCPFlagFin { + tcpHdr := header.TCP(header.IPv4(b).Payload()) + if tcpHdr.Flags() == header.TCPFlagAck|header.TCPFlagFin { // This is a retransmit of the FIN, ignore it. continue } @@ -553,9 +553,13 @@ func TestRstOnCloseWithUnreadData(t *testing.T) { // We shouldn't consume a sequence number on RST. checker.SeqNum(uint32(c.IRS)+1), )) + // The RST puts the endpoint into an error state. + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateError; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } - // This final should be ignored because an ACK on a reset doesn't - // mean anything. + // This final ACK should be ignored because an ACK on a reset doesn't mean + // anything. c.SendPacket(nil, &context.Headers{ SrcPort: context.TestPort, DstPort: c.Port, @@ -618,6 +622,10 @@ func TestRstOnCloseWithUnreadDataFinConvertRst(t *testing.T) { checker.SeqNum(uint32(c.IRS)+1), )) + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateFinWait1; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + // Cause a RST to be generated by closing the read end now since we have // unread data. c.EP.Shutdown(tcpip.ShutdownRead) @@ -630,6 +638,10 @@ func TestRstOnCloseWithUnreadDataFinConvertRst(t *testing.T) { // We shouldn't consume a sequence number on RST. checker.SeqNum(uint32(c.IRS)+1), )) + // The RST puts the endpoint into an error state. + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateError; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } // The ACK to the FIN should now be rejected since the connection has been // closed by a RST. @@ -1510,8 +1522,8 @@ func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) { for bytesReceived != dataLen { b := c.GetPacket() numPackets++ - tcp := header.TCP(header.IPv4(b).Payload()) - payloadLen := len(tcp.Payload()) + tcpHdr := header.TCP(header.IPv4(b).Payload()) + payloadLen := len(tcpHdr.Payload()) checker.IPv4(t, b, checker.TCP( checker.DstPort(context.TestPort), @@ -1522,7 +1534,7 @@ func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) { ) pdata := data[bytesReceived : bytesReceived+payloadLen] - if p := tcp.Payload(); !bytes.Equal(pdata, p) { + if p := tcpHdr.Payload(); !bytes.Equal(pdata, p) { t.Fatalf("got data = %v, want = %v", p, pdata) } bytesReceived += payloadLen @@ -1530,7 +1542,7 @@ func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) { if c.TimeStampEnabled { // If timestamp option is enabled, echo back the timestamp and increment // the TSEcr value included in the packet and send that back as the TSVal. - parsedOpts := tcp.ParsedOptions() + parsedOpts := tcpHdr.ParsedOptions() tsOpt := [12]byte{header.TCPOptionNOP, header.TCPOptionNOP} header.EncodeTSOption(parsedOpts.TSEcr+1, parsedOpts.TSVal, tsOpt[2:]) options = tsOpt[:] @@ -1757,8 +1769,8 @@ func TestSynOptionsOnActiveConnect(t *testing.T) { ), ) - tcp := header.TCP(header.IPv4(b).Payload()) - c.IRS = seqnum.Value(tcp.SequenceNumber()) + tcpHdr := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcpHdr.SequenceNumber()) // Wait for retransmit. time.Sleep(1 * time.Second) @@ -1766,8 +1778,8 @@ func TestSynOptionsOnActiveConnect(t *testing.T) { checker.TCP( checker.DstPort(context.TestPort), checker.TCPFlags(header.TCPFlagSyn), - checker.SrcPort(tcp.SourcePort()), - checker.SeqNum(tcp.SequenceNumber()), + checker.SrcPort(tcpHdr.SourcePort()), + checker.SeqNum(tcpHdr.SequenceNumber()), checker.TCPSynOptions(header.TCPSynOptions{MSS: mss, WS: wndScale}), ), ) @@ -1775,8 +1787,8 @@ func TestSynOptionsOnActiveConnect(t *testing.T) { // Send SYN-ACK. iss := seqnum.Value(789) c.SendPacket(nil, &context.Headers{ - SrcPort: tcp.DestinationPort(), - DstPort: tcp.SourcePort(), + SrcPort: tcpHdr.DestinationPort(), + DstPort: tcpHdr.SourcePort(), Flags: header.TCPFlagSyn | header.TCPFlagAck, SeqNum: iss, AckNum: c.IRS.Add(1), @@ -2523,8 +2535,8 @@ func TestReceivedSegmentQueuing(t *testing.T) { checker.TCPFlags(header.TCPFlagAck), ), ) - tcp := header.TCP(header.IPv4(b).Payload()) - ack := seqnum.Value(tcp.AckNumber()) + tcpHdr := header.TCP(header.IPv4(b).Payload()) + ack := seqnum.Value(tcpHdr.AckNumber()) if ack == last { break } @@ -2568,6 +2580,10 @@ func TestReadAfterClosedState(t *testing.T) { ), ) + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateFinWait1; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + // Send some data and acknowledge the FIN. data := []byte{1, 2, 3} c.SendPacket(data, &context.Headers{ @@ -2589,9 +2605,15 @@ func TestReadAfterClosedState(t *testing.T) { ), ) - // Give the stack the chance to transition to closed state. + // Give the stack the chance to transition to closed state. Note that since + // both the sender and receiver are now closed, we effectively skip the + // TIME-WAIT state. time.Sleep(1 * time.Second) + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateClose; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + // Wait for receive to be notified. select { case <-ch: @@ -3680,9 +3702,15 @@ func TestPassiveConnectionAttemptIncrement(t *testing.T) { if err := ep.Bind(tcpip.FullAddress{Addr: context.StackAddr, Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %v", err) } + if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } if err := c.EP.Listen(1); err != nil { t.Fatalf("Listen failed: %v", err) } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateListen; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } stats := c.Stack().Stats() want := stats.TCP.PassiveConnectionOpenings.Value() + 1 @@ -3826,3 +3854,68 @@ func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) { } } } + +func TestEndpointBindListenAcceptState(t *testing.T) { + c := context.New(t, defaultMTU) + defer c.Cleanup() + wq := &waiter.Queue{} + ep, err := c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + if err := ep.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { + t.Fatalf("Bind failed: %v", err) + } + if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + + if err := ep.Listen(10); err != nil { + t.Fatalf("Listen failed: %v", err) + } + if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + + c.PassiveConnectWithOptions(100, 5, header.TCPSynOptions{MSS: defaultIPv4MSS}) + + // Try to accept the connection. + we, ch := waiter.NewChannelEntry(nil) + wq.EventRegister(&we, waiter.EventIn) + defer wq.EventUnregister(&we) + + aep, _, err := ep.Accept() + if err == tcpip.ErrWouldBlock { + // Wait for connection to be established. + select { + case <-ch: + aep, _, err = ep.Accept() + if err != nil { + t.Fatalf("Accept failed: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("Timed out waiting for accept") + } + } + if got, want := tcp.EndpointState(aep.State()), tcp.StateEstablished; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + // Listening endpoint remains in listen state. + if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + + ep.Close() + // Give worker goroutines time to receive the close notification. + time.Sleep(1 * time.Second) + if got, want := tcp.EndpointState(ep.State()), tcp.StateClose; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + // Accepted endpoint remains open when the listen endpoint is closed. + if got, want := tcp.EndpointState(aep.State()), tcp.StateEstablished; got != want { + t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + +} diff --git a/pkg/tcpip/transport/tcp/testing/context/context.go b/pkg/tcpip/transport/tcp/testing/context/context.go index 6e12413c6..69a43b6f4 100644 --- a/pkg/tcpip/transport/tcp/testing/context/context.go +++ b/pkg/tcpip/transport/tcp/testing/context/context.go @@ -532,6 +532,9 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum. if err != nil { c.t.Fatalf("NewEndpoint failed: %v", err) } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateInitial; got != want { + c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } if epRcvBuf != nil { if err := c.EP.SetSockOpt(*epRcvBuf); err != nil { @@ -557,13 +560,16 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum. checker.TCPFlags(header.TCPFlagSyn), ), ) + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want { + c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got) + } - tcp := header.TCP(header.IPv4(b).Payload()) - c.IRS = seqnum.Value(tcp.SequenceNumber()) + tcpHdr := header.TCP(header.IPv4(b).Payload()) + c.IRS = seqnum.Value(tcpHdr.SequenceNumber()) c.SendPacket(nil, &Headers{ - SrcPort: tcp.DestinationPort(), - DstPort: tcp.SourcePort(), + SrcPort: tcpHdr.DestinationPort(), + DstPort: tcpHdr.SourcePort(), Flags: header.TCPFlagSyn | header.TCPFlagAck, SeqNum: iss, AckNum: c.IRS.Add(1), @@ -591,8 +597,11 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum. case <-time.After(1 * time.Second): c.t.Fatalf("Timed out waiting for connection") } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want { + c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got) + } - c.Port = tcp.SourcePort() + c.Port = tcpHdr.SourcePort() } // RawEndpoint is just a small wrapper around a TCP endpoint's state to make @@ -690,6 +699,9 @@ func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) * if err != nil { c.t.Fatalf("c.s.NewEndpoint(tcp, ipv4...) = %v", err) } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateInitial; got != want { + c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got) + } // Start connection attempt. waitEntry, notifyCh := waiter.NewChannelEntry(nil) @@ -719,6 +731,10 @@ func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) * }), ), ) + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateSynSent; got != want { + c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got) + } + tcpSeg := header.TCP(header.IPv4(b).Payload()) synOptions := header.ParseSynOptions(tcpSeg.Options(), false) @@ -782,6 +798,9 @@ func (c *Context) CreateConnectedWithOptions(wantOptions header.TCPSynOptions) * case <-time.After(1 * time.Second): c.t.Fatalf("Timed out waiting for connection") } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want { + c.t.Fatalf("Unexpected endpoint state: want %v, got %v", want, got) + } // Store the source port in use by the endpoint. c.Port = tcpSeg.SourcePort() @@ -821,10 +840,16 @@ func (c *Context) AcceptWithOptions(wndScale int, synOptions header.TCPSynOption if err := ep.Bind(tcpip.FullAddress{Port: StackPort}); err != nil { c.t.Fatalf("Bind failed: %v", err) } + if got, want := tcp.EndpointState(ep.State()), tcp.StateBound; got != want { + c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } if err := ep.Listen(10); err != nil { c.t.Fatalf("Listen failed: %v", err) } + if got, want := tcp.EndpointState(ep.State()), tcp.StateListen; got != want { + c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } rep := c.PassiveConnectWithOptions(100, wndScale, synOptions) @@ -847,6 +872,10 @@ func (c *Context) AcceptWithOptions(wndScale int, synOptions header.TCPSynOption c.t.Fatalf("Timed out waiting for accept") } } + if got, want := tcp.EndpointState(c.EP.State()), tcp.StateEstablished; got != want { + c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) + } + return rep } diff --git a/pkg/tcpip/transport/udp/endpoint.go b/pkg/tcpip/transport/udp/endpoint.go index 3d52a4f31..fa7278286 100644 --- a/pkg/tcpip/transport/udp/endpoint.go +++ b/pkg/tcpip/transport/udp/endpoint.go @@ -1000,3 +1000,9 @@ func (e *endpoint) HandlePacket(r *stack.Route, id stack.TransportEndpointID, vv // HandleControlPacket implements stack.TransportEndpoint.HandleControlPacket. func (e *endpoint) HandleControlPacket(id stack.TransportEndpointID, typ stack.ControlType, extra uint32, vv buffer.VectorisedView) { } + +// State implements socket.Socket.State. +func (e *endpoint) State() uint32 { + // TODO(b/112063468): Translate internal state to values returned by Linux. + return 0 +} diff --git a/test/syscalls/linux/proc_net_unix.cc b/test/syscalls/linux/proc_net_unix.cc index 6d745f728..82d325c17 100644 --- a/test/syscalls/linux/proc_net_unix.cc +++ b/test/syscalls/linux/proc_net_unix.cc @@ -34,6 +34,16 @@ using absl::StrFormat; constexpr char kProcNetUnixHeader[] = "Num RefCount Protocol Flags Type St Inode Path"; +// Possible values of the "st" field in a /proc/net/unix entry. Source: Linux +// kernel, include/uapi/linux/net.h. +enum { + SS_FREE = 0, // Not allocated + SS_UNCONNECTED, // Unconnected to any socket + SS_CONNECTING, // In process of connecting + SS_CONNECTED, // Connected to socket + SS_DISCONNECTING // In process of disconnecting +}; + // UnixEntry represents a single entry from /proc/net/unix. struct UnixEntry { uintptr_t addr; @@ -71,7 +81,12 @@ PosixErrorOr> ProcNetUnixEntries() { bool skipped_header = false; std::vector entries; std::vector lines = absl::StrSplit(content, absl::ByAnyChar("\n")); + std::cerr << "" << std::endl; for (std::string line : lines) { + // Emit the proc entry to the test output to provide context for the test + // results. + std::cerr << line << std::endl; + if (!skipped_header) { EXPECT_EQ(line, kProcNetUnixHeader); skipped_header = true; @@ -139,6 +154,7 @@ PosixErrorOr> ProcNetUnixEntries() { entries.push_back(entry); } + std::cerr << "" << std::endl; return entries; } @@ -241,6 +257,168 @@ TEST(ProcNetUnix, SocketPair) { EXPECT_EQ(entries.size(), 2); } +TEST(ProcNetUnix, StreamSocketStateUnconnectedOnBind) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE( + AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create()); + + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + std::vector entries = + ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + + const std::string address = ExtractPath(sockets->first_addr()); + UnixEntry bind_entry; + ASSERT_TRUE(FindByPath(entries, &bind_entry, address)); + EXPECT_EQ(bind_entry.state, SS_UNCONNECTED); +} + +TEST(ProcNetUnix, StreamSocketStateStateUnconnectedOnListen) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE( + AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create()); + + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + std::vector entries = + ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + + const std::string address = ExtractPath(sockets->first_addr()); + UnixEntry bind_entry; + ASSERT_TRUE(FindByPath(entries, &bind_entry, address)); + EXPECT_EQ(bind_entry.state, SS_UNCONNECTED); + + ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds()); + + entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + UnixEntry listen_entry; + ASSERT_TRUE( + FindByPath(entries, &listen_entry, ExtractPath(sockets->first_addr()))); + EXPECT_EQ(listen_entry.state, SS_UNCONNECTED); + // The bind and listen entries should refer to the same socket. + EXPECT_EQ(listen_entry.inode, bind_entry.inode); +} + +TEST(ProcNetUnix, StreamSocketStateStateConnectedOnAccept) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE( + AbstractUnboundUnixDomainSocketPair(SOCK_STREAM).Create()); + const std::string address = ExtractPath(sockets->first_addr()); + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + ASSERT_THAT(listen(sockets->first_fd(), 5), SyscallSucceeds()); + std::vector entries = + ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + UnixEntry listen_entry; + ASSERT_TRUE( + FindByPath(entries, &listen_entry, ExtractPath(sockets->first_addr()))); + + ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + int clientfd; + ASSERT_THAT(clientfd = accept(sockets->first_fd(), nullptr, nullptr), + SyscallSucceeds()); + + // Find the entry for the accepted socket. UDS proc entries don't have a + // remote address, so we distinguish the accepted socket from the listen + // socket by checking for a different inode. + entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + UnixEntry accept_entry; + ASSERT_TRUE(FindBy( + entries, &accept_entry, [address, listen_entry](const UnixEntry& e) { + return e.path == address && e.inode != listen_entry.inode; + })); + EXPECT_EQ(accept_entry.state, SS_CONNECTED); + // Listen entry should still be in SS_UNCONNECTED state. + ASSERT_TRUE(FindBy(entries, &listen_entry, + [&sockets, listen_entry](const UnixEntry& e) { + return e.path == ExtractPath(sockets->first_addr()) && + e.inode == listen_entry.inode; + })); + EXPECT_EQ(listen_entry.state, SS_UNCONNECTED); +} + +TEST(ProcNetUnix, DgramSocketStateDisconnectingOnBind) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE( + AbstractUnboundUnixDomainSocketPair(SOCK_DGRAM).Create()); + + std::vector entries = + ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + + // On gVisor, the only two UDS on the system are the ones we just created and + // we rely on this to locate the test socket entries in the remainder of the + // test. On a generic Linux system, we have no easy way to locate the + // corresponding entries, as they don't have an address yet. + if (IsRunningOnGvisor()) { + ASSERT_EQ(entries.size(), 2); + for (auto e : entries) { + ASSERT_EQ(e.state, SS_DISCONNECTING); + } + } + + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + const std::string address = ExtractPath(sockets->first_addr()); + UnixEntry bind_entry; + ASSERT_TRUE(FindByPath(entries, &bind_entry, address)); + EXPECT_EQ(bind_entry.state, SS_UNCONNECTED); +} + +TEST(ProcNetUnix, DgramSocketStateConnectingOnConnect) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE( + AbstractUnboundUnixDomainSocketPair(SOCK_DGRAM).Create()); + + std::vector entries = + ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + + // On gVisor, the only two UDS on the system are the ones we just created and + // we rely on this to locate the test socket entries in the remainder of the + // test. On a generic Linux system, we have no easy way to locate the + // corresponding entries, as they don't have an address yet. + if (IsRunningOnGvisor()) { + ASSERT_EQ(entries.size(), 2); + for (auto e : entries) { + ASSERT_EQ(e.state, SS_DISCONNECTING); + } + } + + ASSERT_THAT(bind(sockets->first_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + const std::string address = ExtractPath(sockets->first_addr()); + UnixEntry bind_entry; + ASSERT_TRUE(FindByPath(entries, &bind_entry, address)); + + ASSERT_THAT(connect(sockets->second_fd(), sockets->first_addr(), + sockets->first_addr_size()), + SyscallSucceeds()); + + entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUnixEntries()); + + // Once again, we have no easy way to identify the connecting socket as it has + // no listed address. We can only identify the entry as the "non-bind socket + // entry" on gVisor, where we're guaranteed to have only the two entries we + // create during this test. + if (IsRunningOnGvisor()) { + ASSERT_EQ(entries.size(), 2); + UnixEntry connect_entry; + ASSERT_TRUE( + FindBy(entries, &connect_entry, [bind_entry](const UnixEntry& e) { + return e.inode != bind_entry.inode; + })); + EXPECT_EQ(connect_entry.state, SS_CONNECTING); + } +} + } // namespace } // namespace testing } // namespace gvisor -- cgit v1.2.3 From b3f104507d7a04c0ca058cbcacc5ff78d853f4ba Mon Sep 17 00:00:00 2001 From: Jamie Liu Date: Thu, 6 Jun 2019 16:27:09 -0700 Subject: "Implement" mbind(2). We still only advertise a single NUMA node, and ignore mempolicy accordingly, but mbind() at least now succeeds and has effects reflected by get_mempolicy(). Also fix handling of nodemasks: round sizes to unsigned long (as documented and done by Linux), and zero trailing bits when copying them out. PiperOrigin-RevId: 251950859 --- pkg/abi/linux/mm.go | 9 + pkg/sentry/kernel/task.go | 7 +- pkg/sentry/kernel/task_sched.go | 4 +- pkg/sentry/mm/mm.go | 6 + pkg/sentry/mm/syscalls.go | 53 +++++ pkg/sentry/mm/vma.go | 3 + pkg/sentry/syscalls/linux/BUILD | 1 + pkg/sentry/syscalls/linux/linux64.go | 3 +- pkg/sentry/syscalls/linux/sys_mempolicy.go | 312 +++++++++++++++++++++++++++++ pkg/sentry/syscalls/linux/sys_mmap.go | 145 -------------- test/syscalls/linux/BUILD | 1 + test/syscalls/linux/mempolicy.cc | 37 +++- 12 files changed, 426 insertions(+), 155 deletions(-) create mode 100644 pkg/sentry/syscalls/linux/sys_mempolicy.go (limited to 'test/syscalls/linux') diff --git a/pkg/abi/linux/mm.go b/pkg/abi/linux/mm.go index 0b02f938a..cd043dac3 100644 --- a/pkg/abi/linux/mm.go +++ b/pkg/abi/linux/mm.go @@ -114,3 +114,12 @@ const ( MPOL_MODE_FLAGS = (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) ) + +// Flags for mbind(2). +const ( + MPOL_MF_STRICT = 1 << 0 + MPOL_MF_MOVE = 1 << 1 + MPOL_MF_MOVE_ALL = 1 << 2 + + MPOL_MF_VALID = MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL +) diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go index f9378c2de..4d889422f 100644 --- a/pkg/sentry/kernel/task.go +++ b/pkg/sentry/kernel/task.go @@ -455,12 +455,13 @@ type Task struct { // single numa node, all policies are no-ops. We only track this information // so that we can return reasonable values if the application calls // get_mempolicy(2) after setting a non-default policy. Note that in the - // real syscall, nodemask can be longer than 4 bytes, but we always report a - // single node so never need to save more than a single bit. + // real syscall, nodemask can be longer than a single unsigned long, but we + // always report a single node so never need to save more than a single + // bit. // // numaPolicy and numaNodeMask are protected by mu. numaPolicy int32 - numaNodeMask uint32 + numaNodeMask uint64 // If netns is true, the task is in a non-root network namespace. Network // namespaces aren't currently implemented in full; being in a network diff --git a/pkg/sentry/kernel/task_sched.go b/pkg/sentry/kernel/task_sched.go index 5455f6ea9..1c94ab11b 100644 --- a/pkg/sentry/kernel/task_sched.go +++ b/pkg/sentry/kernel/task_sched.go @@ -622,14 +622,14 @@ func (t *Task) SetNiceness(n int) { } // NumaPolicy returns t's current numa policy. -func (t *Task) NumaPolicy() (policy int32, nodeMask uint32) { +func (t *Task) NumaPolicy() (policy int32, nodeMask uint64) { t.mu.Lock() defer t.mu.Unlock() return t.numaPolicy, t.numaNodeMask } // SetNumaPolicy sets t's numa policy. -func (t *Task) SetNumaPolicy(policy int32, nodeMask uint32) { +func (t *Task) SetNumaPolicy(policy int32, nodeMask uint64) { t.mu.Lock() defer t.mu.Unlock() t.numaPolicy = policy diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go index 0a026ff8c..604866d04 100644 --- a/pkg/sentry/mm/mm.go +++ b/pkg/sentry/mm/mm.go @@ -276,6 +276,12 @@ type vma struct { mlockMode memmap.MLockMode + // numaPolicy is the NUMA policy for this vma set by mbind(). + numaPolicy int32 + + // numaNodemask is the NUMA nodemask for this vma set by mbind(). + numaNodemask uint64 + // If id is not nil, it controls the lifecycle of mappable and provides vma // metadata shown in /proc/[pid]/maps, and the vma holds a reference. id memmap.MappingIdentity diff --git a/pkg/sentry/mm/syscalls.go b/pkg/sentry/mm/syscalls.go index af1e53f5d..9cf136532 100644 --- a/pkg/sentry/mm/syscalls.go +++ b/pkg/sentry/mm/syscalls.go @@ -973,6 +973,59 @@ func (mm *MemoryManager) MLockAll(ctx context.Context, opts MLockAllOpts) error return nil } +// NumaPolicy implements the semantics of Linux's get_mempolicy(MPOL_F_ADDR). +func (mm *MemoryManager) NumaPolicy(addr usermem.Addr) (int32, uint64, error) { + mm.mappingMu.RLock() + defer mm.mappingMu.RUnlock() + vseg := mm.vmas.FindSegment(addr) + if !vseg.Ok() { + return 0, 0, syserror.EFAULT + } + vma := vseg.ValuePtr() + return vma.numaPolicy, vma.numaNodemask, nil +} + +// SetNumaPolicy implements the semantics of Linux's mbind(). +func (mm *MemoryManager) SetNumaPolicy(addr usermem.Addr, length uint64, policy int32, nodemask uint64) error { + if !addr.IsPageAligned() { + return syserror.EINVAL + } + // Linux allows this to overflow. + la, _ := usermem.Addr(length).RoundUp() + ar, ok := addr.ToRange(uint64(la)) + if !ok { + return syserror.EINVAL + } + if ar.Length() == 0 { + return nil + } + + mm.mappingMu.Lock() + defer mm.mappingMu.Unlock() + defer func() { + mm.vmas.MergeRange(ar) + mm.vmas.MergeAdjacent(ar) + }() + vseg := mm.vmas.LowerBoundSegment(ar.Start) + lastEnd := ar.Start + for { + if !vseg.Ok() || lastEnd < vseg.Start() { + // "EFAULT: ... there was an unmapped hole in the specified memory + // range specified [sic] by addr and len." - mbind(2) + return syserror.EFAULT + } + vseg = mm.vmas.Isolate(vseg, ar) + vma := vseg.ValuePtr() + vma.numaPolicy = policy + vma.numaNodemask = nodemask + lastEnd = vseg.End() + if ar.End <= lastEnd { + return nil + } + vseg, _ = vseg.NextNonEmpty() + } +} + // Decommit implements the semantics of Linux's madvise(MADV_DONTNEED). func (mm *MemoryManager) Decommit(addr usermem.Addr, length uint64) error { ar, ok := addr.ToRange(length) diff --git a/pkg/sentry/mm/vma.go b/pkg/sentry/mm/vma.go index 02203f79f..0af8de5b0 100644 --- a/pkg/sentry/mm/vma.go +++ b/pkg/sentry/mm/vma.go @@ -107,6 +107,7 @@ func (mm *MemoryManager) createVMALocked(ctx context.Context, opts memmap.MMapOp private: opts.Private, growsDown: opts.GrowsDown, mlockMode: opts.MLockMode, + numaPolicy: linux.MPOL_DEFAULT, id: opts.MappingIdentity, hint: opts.Hint, } @@ -436,6 +437,8 @@ func (vmaSetFunctions) Merge(ar1 usermem.AddrRange, vma1 vma, ar2 usermem.AddrRa vma1.private != vma2.private || vma1.growsDown != vma2.growsDown || vma1.mlockMode != vma2.mlockMode || + vma1.numaPolicy != vma2.numaPolicy || + vma1.numaNodemask != vma2.numaNodemask || vma1.id != vma2.id || vma1.hint != vma2.hint { return vma{}, false diff --git a/pkg/sentry/syscalls/linux/BUILD b/pkg/sentry/syscalls/linux/BUILD index f76989ae2..1c057526b 100644 --- a/pkg/sentry/syscalls/linux/BUILD +++ b/pkg/sentry/syscalls/linux/BUILD @@ -19,6 +19,7 @@ go_library( "sys_identity.go", "sys_inotify.go", "sys_lseek.go", + "sys_mempolicy.go", "sys_mmap.go", "sys_mount.go", "sys_pipe.go", diff --git a/pkg/sentry/syscalls/linux/linux64.go b/pkg/sentry/syscalls/linux/linux64.go index 3e4d312af..ad88b1391 100644 --- a/pkg/sentry/syscalls/linux/linux64.go +++ b/pkg/sentry/syscalls/linux/linux64.go @@ -360,8 +360,7 @@ var AMD64 = &kernel.SyscallTable{ 235: Utimes, // @Syscall(Vserver, note:Not implemented by Linux) 236: syscalls.Error(syscall.ENOSYS), // Vserver, not implemented by Linux - // @Syscall(Mbind, returns:EPERM or ENOSYS, note:Returns EPERM if the process does not have cap_sys_nice; ENOSYS otherwise), TODO(b/117792295) - 237: syscalls.CapError(linux.CAP_SYS_NICE), // may require cap_sys_nice + 237: Mbind, 238: SetMempolicy, 239: GetMempolicy, // 240: @Syscall(MqOpen), TODO(b/29354921) diff --git a/pkg/sentry/syscalls/linux/sys_mempolicy.go b/pkg/sentry/syscalls/linux/sys_mempolicy.go new file mode 100644 index 000000000..652b2c206 --- /dev/null +++ b/pkg/sentry/syscalls/linux/sys_mempolicy.go @@ -0,0 +1,312 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package linux + +import ( + "fmt" + + "gvisor.googlesource.com/gvisor/pkg/abi/linux" + "gvisor.googlesource.com/gvisor/pkg/sentry/arch" + "gvisor.googlesource.com/gvisor/pkg/sentry/kernel" + "gvisor.googlesource.com/gvisor/pkg/sentry/usermem" + "gvisor.googlesource.com/gvisor/pkg/syserror" +) + +// We unconditionally report a single NUMA node. This also means that our +// "nodemask_t" is a single unsigned long (uint64). +const ( + maxNodes = 1 + allowedNodemask = (1 << maxNodes) - 1 +) + +func copyInNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32) (uint64, error) { + // "nodemask points to a bit mask of node IDs that contains up to maxnode + // bits. The bit mask size is rounded to the next multiple of + // sizeof(unsigned long), but the kernel will use bits only up to maxnode. + // A NULL value of nodemask or a maxnode value of zero specifies the empty + // set of nodes. If the value of maxnode is zero, the nodemask argument is + // ignored." - set_mempolicy(2). Unfortunately, most of this is inaccurate + // because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses + // maxnode-1, not maxnode, as the number of bits. + bits := maxnode - 1 + if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0 + return 0, syserror.EINVAL + } + if bits == 0 { + return 0, nil + } + // Copy in the whole nodemask. + numUint64 := (bits + 63) / 64 + buf := t.CopyScratchBuffer(int(numUint64) * 8) + if _, err := t.CopyInBytes(addr, buf); err != nil { + return 0, err + } + val := usermem.ByteOrder.Uint64(buf) + // Check that only allowed bits in the first unsigned long in the nodemask + // are set. + if val&^allowedNodemask != 0 { + return 0, syserror.EINVAL + } + // Check that all remaining bits in the nodemask are 0. + for i := 8; i < len(buf); i++ { + if buf[i] != 0 { + return 0, syserror.EINVAL + } + } + return val, nil +} + +func copyOutNodemask(t *kernel.Task, addr usermem.Addr, maxnode uint32, val uint64) error { + // mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of + // bits. + bits := maxnode - 1 + if bits > usermem.PageSize*8 { // also handles overflow from maxnode == 0 + return syserror.EINVAL + } + if bits == 0 { + return nil + } + // Copy out the first unsigned long in the nodemask. + buf := t.CopyScratchBuffer(8) + usermem.ByteOrder.PutUint64(buf, val) + if _, err := t.CopyOutBytes(addr, buf); err != nil { + return err + } + // Zero out remaining unsigned longs in the nodemask. + if bits > 64 { + remAddr, ok := addr.AddLength(8) + if !ok { + return syserror.EFAULT + } + remUint64 := (bits - 1) / 64 + if _, err := t.MemoryManager().ZeroOut(t, remAddr, int64(remUint64)*8, usermem.IOOpts{ + AddressSpaceActive: true, + }); err != nil { + return err + } + } + return nil +} + +// GetMempolicy implements the syscall get_mempolicy(2). +func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + mode := args[0].Pointer() + nodemask := args[1].Pointer() + maxnode := args[2].Uint() + addr := args[3].Pointer() + flags := args[4].Uint() + + if flags&^(linux.MPOL_F_NODE|linux.MPOL_F_ADDR|linux.MPOL_F_MEMS_ALLOWED) != 0 { + return 0, nil, syserror.EINVAL + } + nodeFlag := flags&linux.MPOL_F_NODE != 0 + addrFlag := flags&linux.MPOL_F_ADDR != 0 + memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0 + + // "EINVAL: The value specified by maxnode is less than the number of node + // IDs supported by the system." - get_mempolicy(2) + if nodemask != 0 && maxnode < maxNodes { + return 0, nil, syserror.EINVAL + } + + // "If flags specifies MPOL_F_MEMS_ALLOWED [...], the mode argument is + // ignored and the set of nodes (memories) that the thread is allowed to + // specify in subsequent calls to mbind(2) or set_mempolicy(2) (in the + // absence of any mode flags) is returned in nodemask." + if memsAllowed { + // "It is not permitted to combine MPOL_F_MEMS_ALLOWED with either + // MPOL_F_ADDR or MPOL_F_NODE." + if nodeFlag || addrFlag { + return 0, nil, syserror.EINVAL + } + if err := copyOutNodemask(t, nodemask, maxnode, allowedNodemask); err != nil { + return 0, nil, err + } + return 0, nil, nil + } + + // "If flags specifies MPOL_F_ADDR, then information is returned about the + // policy governing the memory address given in addr. ... If the mode + // argument is not NULL, then get_mempolicy() will store the policy mode + // and any optional mode flags of the requested NUMA policy in the location + // pointed to by this argument. If nodemask is not NULL, then the nodemask + // associated with the policy will be stored in the location pointed to by + // this argument." + if addrFlag { + policy, nodemaskVal, err := t.MemoryManager().NumaPolicy(addr) + if err != nil { + return 0, nil, err + } + if nodeFlag { + // "If flags specifies both MPOL_F_NODE and MPOL_F_ADDR, + // get_mempolicy() will return the node ID of the node on which the + // address addr is allocated into the location pointed to by mode. + // If no page has yet been allocated for the specified address, + // get_mempolicy() will allocate a page as if the thread had + // performed a read (load) access to that address, and return the + // ID of the node where that page was allocated." + buf := t.CopyScratchBuffer(1) + _, err := t.CopyInBytes(addr, buf) + if err != nil { + return 0, nil, err + } + policy = 0 // maxNodes == 1 + } + if mode != 0 { + if _, err := t.CopyOut(mode, policy); err != nil { + return 0, nil, err + } + } + if nodemask != 0 { + if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil { + return 0, nil, err + } + } + return 0, nil, nil + } + + // "EINVAL: ... flags specified MPOL_F_ADDR and addr is NULL, or flags did + // not specify MPOL_F_ADDR and addr is not NULL." This is partially + // inaccurate: if flags specifies MPOL_F_ADDR, + // mm/mempolicy.c:do_get_mempolicy() doesn't special-case NULL; it will + // just (usually) fail to find a VMA at address 0 and return EFAULT. + if addr != 0 { + return 0, nil, syserror.EINVAL + } + + // "If flags is specified as 0, then information about the calling thread's + // default policy (as set by set_mempolicy(2)) is returned, in the buffers + // pointed to by mode and nodemask. ... If flags specifies MPOL_F_NODE, but + // not MPOL_F_ADDR, and the thread's current policy is MPOL_INTERLEAVE, + // then get_mempolicy() will return in the location pointed to by a + // non-NULL mode argument, the node ID of the next node that will be used + // for interleaving of internal kernel pages allocated on behalf of the + // thread." + policy, nodemaskVal := t.NumaPolicy() + if nodeFlag { + if policy&^linux.MPOL_MODE_FLAGS != linux.MPOL_INTERLEAVE { + return 0, nil, syserror.EINVAL + } + policy = 0 // maxNodes == 1 + } + if mode != 0 { + if _, err := t.CopyOut(mode, policy); err != nil { + return 0, nil, err + } + } + if nodemask != 0 { + if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil { + return 0, nil, err + } + } + return 0, nil, nil +} + +// SetMempolicy implements the syscall set_mempolicy(2). +func SetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + modeWithFlags := args[0].Int() + nodemask := args[1].Pointer() + maxnode := args[2].Uint() + + modeWithFlags, nodemaskVal, err := copyInMempolicyNodemask(t, modeWithFlags, nodemask, maxnode) + if err != nil { + return 0, nil, err + } + + t.SetNumaPolicy(modeWithFlags, nodemaskVal) + return 0, nil, nil +} + +// Mbind implements the syscall mbind(2). +func Mbind(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { + addr := args[0].Pointer() + length := args[1].Uint64() + mode := args[2].Int() + nodemask := args[3].Pointer() + maxnode := args[4].Uint() + flags := args[5].Uint() + + if flags&^linux.MPOL_MF_VALID != 0 { + return 0, nil, syserror.EINVAL + } + // "If MPOL_MF_MOVE_ALL is passed in flags ... [the] calling thread must be + // privileged (CAP_SYS_NICE) to use this flag." - mbind(2) + if flags&linux.MPOL_MF_MOVE_ALL != 0 && !t.HasCapability(linux.CAP_SYS_NICE) { + return 0, nil, syserror.EPERM + } + + mode, nodemaskVal, err := copyInMempolicyNodemask(t, mode, nodemask, maxnode) + if err != nil { + return 0, nil, err + } + + // Since we claim to have only a single node, all flags can be ignored + // (since all pages must already be on that single node). + err = t.MemoryManager().SetNumaPolicy(addr, length, mode, nodemaskVal) + return 0, nil, err +} + +func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags int32, nodemask usermem.Addr, maxnode uint32) (int32, uint64, error) { + flags := modeWithFlags & linux.MPOL_MODE_FLAGS + mode := modeWithFlags &^ linux.MPOL_MODE_FLAGS + if flags == linux.MPOL_MODE_FLAGS { + // Can't specify both mode flags simultaneously. + return 0, 0, syserror.EINVAL + } + if mode < 0 || mode >= linux.MPOL_MAX { + // Must specify a valid mode. + return 0, 0, syserror.EINVAL + } + + var nodemaskVal uint64 + if nodemask != 0 { + var err error + nodemaskVal, err = copyInNodemask(t, nodemask, maxnode) + if err != nil { + return 0, 0, err + } + } + + switch mode { + case linux.MPOL_DEFAULT: + // "nodemask must be specified as NULL." - set_mempolicy(2). This is inaccurate; + // Linux allows a nodemask to be specified, as long as it is empty. + if nodemaskVal != 0 { + return 0, 0, syserror.EINVAL + } + case linux.MPOL_BIND, linux.MPOL_INTERLEAVE: + // These require a non-empty nodemask. + if nodemaskVal == 0 { + return 0, 0, syserror.EINVAL + } + case linux.MPOL_PREFERRED: + // This permits an empty nodemask, as long as no flags are set. + if nodemaskVal == 0 && flags != 0 { + return 0, 0, syserror.EINVAL + } + case linux.MPOL_LOCAL: + // This requires an empty nodemask and no flags set ... + if nodemaskVal != 0 || flags != 0 { + return 0, 0, syserror.EINVAL + } + // ... and is implemented as MPOL_PREFERRED. + mode = linux.MPOL_PREFERRED + default: + // Unknown mode, which we should have rejected above. + panic(fmt.Sprintf("unknown mode: %v", mode)) + } + + return mode | flags, nodemaskVal, nil +} diff --git a/pkg/sentry/syscalls/linux/sys_mmap.go b/pkg/sentry/syscalls/linux/sys_mmap.go index 64a6e639c..9926f0ac5 100644 --- a/pkg/sentry/syscalls/linux/sys_mmap.go +++ b/pkg/sentry/syscalls/linux/sys_mmap.go @@ -204,151 +204,6 @@ func Madvise(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.Sysca } } -func copyOutIfNotNull(t *kernel.Task, ptr usermem.Addr, val interface{}) (int, error) { - if ptr != 0 { - return t.CopyOut(ptr, val) - } - return 0, nil -} - -// GetMempolicy implements the syscall get_mempolicy(2). -func GetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { - mode := args[0].Pointer() - nodemask := args[1].Pointer() - maxnode := args[2].Uint() - addr := args[3].Pointer() - flags := args[4].Uint() - - memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0 - nodeFlag := flags&linux.MPOL_F_NODE != 0 - addrFlag := flags&linux.MPOL_F_ADDR != 0 - - // TODO(rahat): Once sysfs is implemented, report a single numa node in - // /sys/devices/system/node. - if nodemask != 0 && maxnode < 1 { - return 0, nil, syserror.EINVAL - } - - // 'addr' provided iff 'addrFlag' set. - if addrFlag == (addr == 0) { - return 0, nil, syserror.EINVAL - } - - // Default policy for the thread. - if flags == 0 { - policy, nodemaskVal := t.NumaPolicy() - if _, err := copyOutIfNotNull(t, mode, policy); err != nil { - return 0, nil, syserror.EFAULT - } - if _, err := copyOutIfNotNull(t, nodemask, nodemaskVal); err != nil { - return 0, nil, syserror.EFAULT - } - return 0, nil, nil - } - - // Report all nodes available to caller. - if memsAllowed { - // MPOL_F_NODE and MPOL_F_ADDR not allowed with MPOL_F_MEMS_ALLOWED. - if nodeFlag || addrFlag { - return 0, nil, syserror.EINVAL - } - - // Report a single numa node. - if _, err := copyOutIfNotNull(t, nodemask, uint32(0x1)); err != nil { - return 0, nil, syserror.EFAULT - } - return 0, nil, nil - } - - if addrFlag { - if nodeFlag { - // Return the id for the node where 'addr' resides, via 'mode'. - // - // The real get_mempolicy(2) allocates the page referenced by 'addr' - // by simulating a read, if it is unallocated before the call. It - // then returns the node the page is allocated on through the mode - // pointer. - b := t.CopyScratchBuffer(1) - _, err := t.CopyInBytes(addr, b) - if err != nil { - return 0, nil, syserror.EFAULT - } - if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil { - return 0, nil, syserror.EFAULT - } - } else { - storedPolicy, _ := t.NumaPolicy() - // Return the policy governing the memory referenced by 'addr'. - if _, err := copyOutIfNotNull(t, mode, int32(storedPolicy)); err != nil { - return 0, nil, syserror.EFAULT - } - } - return 0, nil, nil - } - - storedPolicy, _ := t.NumaPolicy() - if nodeFlag && (storedPolicy&^linux.MPOL_MODE_FLAGS == linux.MPOL_INTERLEAVE) { - // Policy for current thread is to interleave memory between - // nodes. Return the next node we'll allocate on. Since we only have a - // single node, this is always node 0. - if _, err := copyOutIfNotNull(t, mode, int32(0)); err != nil { - return 0, nil, syserror.EFAULT - } - return 0, nil, nil - } - - return 0, nil, syserror.EINVAL -} - -func allowedNodesMask() uint32 { - const maxNodes = 1 - return ^uint32((1 << maxNodes) - 1) -} - -// SetMempolicy implements the syscall set_mempolicy(2). -func SetMempolicy(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { - modeWithFlags := args[0].Int() - nodemask := args[1].Pointer() - maxnode := args[2].Uint() - - if nodemask != 0 && maxnode < 1 { - return 0, nil, syserror.EINVAL - } - - if modeWithFlags&linux.MPOL_MODE_FLAGS == linux.MPOL_MODE_FLAGS { - // Can't specify multiple modes simultaneously. - return 0, nil, syserror.EINVAL - } - - mode := modeWithFlags &^ linux.MPOL_MODE_FLAGS - if mode < 0 || mode >= linux.MPOL_MAX { - // Must specify a valid mode. - return 0, nil, syserror.EINVAL - } - - var nodemaskVal uint32 - // Nodemask may be empty for some policy modes. - if nodemask != 0 && maxnode > 0 { - if _, err := t.CopyIn(nodemask, &nodemaskVal); err != nil { - return 0, nil, syserror.EFAULT - } - } - - if (mode == linux.MPOL_INTERLEAVE || mode == linux.MPOL_BIND) && nodemaskVal == 0 { - // Mode requires a non-empty nodemask, but got an empty nodemask. - return 0, nil, syserror.EINVAL - } - - if nodemaskVal&allowedNodesMask() != 0 { - // Invalid node specified. - return 0, nil, syserror.EINVAL - } - - t.SetNumaPolicy(int32(modeWithFlags), nodemaskVal) - - return 0, nil, nil -} - // Mincore implements the syscall mincore(2). func Mincore(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { addr := args[0].Pointer() diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD index 0cb7b47b6..9bafc6e4f 100644 --- a/test/syscalls/linux/BUILD +++ b/test/syscalls/linux/BUILD @@ -999,6 +999,7 @@ cc_binary( linkstatic = 1, deps = [ "//test/util:cleanup", + "//test/util:memory_util", "//test/util:test_main", "//test/util:test_util", "//test/util:thread_util", diff --git a/test/syscalls/linux/mempolicy.cc b/test/syscalls/linux/mempolicy.cc index 4ac4cb88f..9d5f47651 100644 --- a/test/syscalls/linux/mempolicy.cc +++ b/test/syscalls/linux/mempolicy.cc @@ -18,6 +18,7 @@ #include "gtest/gtest.h" #include "absl/memory/memory.h" #include "test/util/cleanup.h" +#include "test/util/memory_util.h" #include "test/util/test_util.h" #include "test/util/thread_util.h" @@ -34,7 +35,7 @@ namespace { #define MPOL_PREFERRED 1 #define MPOL_BIND 2 #define MPOL_INTERLEAVE 3 -#define MPOL_MAX MPOL_INTERLEAVE +#define MPOL_LOCAL 4 #define MPOL_F_NODE (1 << 0) #define MPOL_F_ADDR (1 << 1) #define MPOL_F_MEMS_ALLOWED (1 << 2) @@ -44,11 +45,17 @@ namespace { int get_mempolicy(int *policy, uint64_t *nmask, uint64_t maxnode, void *addr, int flags) { - return syscall(__NR_get_mempolicy, policy, nmask, maxnode, addr, flags); + return syscall(SYS_get_mempolicy, policy, nmask, maxnode, addr, flags); } int set_mempolicy(int mode, uint64_t *nmask, uint64_t maxnode) { - return syscall(__NR_set_mempolicy, mode, nmask, maxnode); + return syscall(SYS_set_mempolicy, mode, nmask, maxnode); +} + +int mbind(void *addr, unsigned long len, int mode, + const unsigned long *nodemask, unsigned long maxnode, + unsigned flags) { + return syscall(SYS_mbind, addr, len, mode, nodemask, maxnode, flags); } // Creates a cleanup object that resets the calling thread's mempolicy to the @@ -252,6 +259,30 @@ TEST(MempolicyTest, GetMempolicyNextInterleaveNode) { EXPECT_EQ(0, mode); } +TEST(MempolicyTest, Mbind) { + // Temporarily set the thread policy to MPOL_PREFERRED. + const auto cleanup_thread_policy = + ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy(MPOL_PREFERRED, nullptr, 0)); + + const auto mapping = ASSERT_NO_ERRNO_AND_VALUE( + MmapAnon(kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS)); + + // vmas default to MPOL_DEFAULT irrespective of the thread policy (currently + // MPOL_PREFERRED). + int mode; + ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR), + SyscallSucceeds()); + EXPECT_EQ(mode, MPOL_DEFAULT); + + // Set MPOL_PREFERRED for the vma and read it back. + ASSERT_THAT( + mbind(mapping.ptr(), mapping.len(), MPOL_PREFERRED, nullptr, 0, 0), + SyscallSucceeds()); + ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, mapping.ptr(), MPOL_F_ADDR), + SyscallSucceeds()); + EXPECT_EQ(mode, MPOL_PREFERRED); +} + } // namespace } // namespace testing -- cgit v1.2.3 From 3933dd5c04c56512eccb38657bb735f375db4de4 Mon Sep 17 00:00:00 2001 From: Bhasker Hariharan Date: Mon, 10 Jun 2019 15:39:35 -0700 Subject: Fixes to listen backlog handling. Changes netstack to confirm to current linux behaviour where if the backlog is full then we drop the SYN and do not send a SYN-ACK. Similarly we allow upto backlog connections to be in SYN-RCVD state as long as the backlog is not full. We also now drop a SYN if syn cookies are in use and the backlog for the listening endpoint is full. Added new tests to confirm the behaviour. Also reverted the change to increase the backlog in TcpPortReuseMultiThread syscall test. Fixes #236 PiperOrigin-RevId: 252500462 --- pkg/tcpip/transport/tcp/accept.go | 33 +++- pkg/tcpip/transport/tcp/connect.go | 31 +--- pkg/tcpip/transport/tcp/tcp_test.go | 267 +++++++++++++--------------- test/syscalls/linux/socket_inet_loopback.cc | 63 ++++++- 4 files changed, 219 insertions(+), 175 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index a32e20b06..d05259c0a 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -251,7 +251,7 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head // Perform the 3-way handshake. h := newHandshake(ep, l.rcvWnd) - h.resetToSynRcvd(cookie, irs, opts, l.listenEP) + h.resetToSynRcvd(cookie, irs, opts) if err := h.execute(); err != nil { ep.stack.Stats().TCP.FailedConnectionAttempts.Increment() ep.Close() @@ -294,7 +294,6 @@ func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header defer decSynRcvdCount() defer e.decSynRcvdCount() defer s.decRef() - n, err := ctx.createEndpointAndPerformHandshake(s, opts) if err != nil { e.stack.Stats().TCP.FailedConnectionAttempts.Increment() @@ -306,7 +305,7 @@ func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header func (e *endpoint) incSynRcvdCount() bool { e.mu.Lock() - if l, c := len(e.acceptedChan), cap(e.acceptedChan); l == c && e.synRcvdCount >= c { + if e.synRcvdCount >= cap(e.acceptedChan) { e.mu.Unlock() return false } @@ -321,6 +320,16 @@ func (e *endpoint) decSynRcvdCount() { e.mu.Unlock() } +func (e *endpoint) acceptQueueIsFull() bool { + e.mu.Lock() + if l, c := len(e.acceptedChan)+e.synRcvdCount, cap(e.acceptedChan); l >= c { + e.mu.Unlock() + return true + } + e.mu.Unlock() + return false +} + // handleListenSegment is called when a listening endpoint receives a segment // and needs to handle it. func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { @@ -328,17 +337,27 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { case header.TCPFlagSyn: opts := parseSynSegmentOptions(s) if incSynRcvdCount() { - // Drop the SYN if the listen endpoint's accept queue is - // overflowing. - if e.incSynRcvdCount() { + // Only handle the syn if the following conditions hold + // - accept queue is not full. + // - number of connections in synRcvd state is less than the + // backlog. + if !e.acceptQueueIsFull() && e.incSynRcvdCount() { s.incRef() go e.handleSynSegment(ctx, s, &opts) // S/R-SAFE: synRcvdCount is the barrier. return } + decSynRcvdCount() e.stack.Stats().TCP.ListenOverflowSynDrop.Increment() e.stack.Stats().DroppedPackets.Increment() return } else { + // If cookies are in use but the endpoint accept queue + // is full then drop the syn. + if e.acceptQueueIsFull() { + e.stack.Stats().TCP.ListenOverflowSynDrop.Increment() + e.stack.Stats().DroppedPackets.Increment() + return + } cookie := ctx.createCookie(s.id, s.sequenceNumber, encodeMSS(opts.MSS)) // Send SYN with window scaling because we currently // dont't encode this information in the cookie. @@ -356,7 +375,7 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { } case header.TCPFlagAck: - if len(e.acceptedChan) == cap(e.acceptedChan) { + if e.acceptQueueIsFull() { // Silently drop the ack as the application can't accept // the connection at this point. The ack will be // retransmitted by the sender anyway and we can diff --git a/pkg/tcpip/transport/tcp/connect.go b/pkg/tcpip/transport/tcp/connect.go index 0ad7bfb38..dd671f7ce 100644 --- a/pkg/tcpip/transport/tcp/connect.go +++ b/pkg/tcpip/transport/tcp/connect.go @@ -60,12 +60,11 @@ const ( // handshake holds the state used during a TCP 3-way handshake. type handshake struct { - ep *endpoint - listenEP *endpoint // only non nil when doing passive connects. - state handshakeState - active bool - flags uint8 - ackNum seqnum.Value + ep *endpoint + state handshakeState + active bool + flags uint8 + ackNum seqnum.Value // iss is the initial send sequence number, as defined in RFC 793. iss seqnum.Value @@ -142,7 +141,7 @@ func (h *handshake) effectiveRcvWndScale() uint8 { // resetToSynRcvd resets the state of the handshake object to the SYN-RCVD // state. -func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts *header.TCPSynOptions, listenEP *endpoint) { +func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts *header.TCPSynOptions) { h.active = false h.state = handshakeSynRcvd h.flags = header.TCPFlagSyn | header.TCPFlagAck @@ -150,7 +149,6 @@ func (h *handshake) resetToSynRcvd(iss seqnum.Value, irs seqnum.Value, opts *hea h.ackNum = irs + 1 h.mss = opts.MSS h.sndWndScale = opts.WS - h.listenEP = listenEP h.ep.mu.Lock() h.ep.state = StateSynRecv h.ep.mu.Unlock() @@ -287,23 +285,6 @@ func (h *handshake) synRcvdState(s *segment) *tcpip.Error { // We have previously received (and acknowledged) the peer's SYN. If the // peer acknowledges our SYN, the handshake is completed. if s.flagIsSet(header.TCPFlagAck) { - // listenContext is also used by a tcp.Forwarder and in that - // context we do not have a listening endpoint to check the - // backlog. So skip this check if listenEP is nil. - if h.listenEP != nil { - h.listenEP.mu.Lock() - if len(h.listenEP.acceptedChan) == cap(h.listenEP.acceptedChan) { - h.listenEP.mu.Unlock() - // If there is no space in the accept queue to accept - // this endpoint then silently drop this ACK. The peer - // will anyway resend the ack and we can complete the - // connection the next time it's retransmitted. - h.ep.stack.Stats().TCP.ListenOverflowAckDrop.Increment() - h.ep.stack.Stats().DroppedPackets.Increment() - return nil - } - h.listenEP.mu.Unlock() - } // If the timestamp option is negotiated and the segment does // not carry a timestamp option then the segment must be dropped // as per https://tools.ietf.org/html/rfc7323#section-3.2. diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go index 56b490aaa..779ca8b76 100644 --- a/pkg/tcpip/transport/tcp/tcp_test.go +++ b/pkg/tcpip/transport/tcp/tcp_test.go @@ -3405,7 +3405,7 @@ func executeHandshake(t *testing.T, c *context.Context, srcPort uint16, synCooki RcvWnd: 30000, }) - // Receive the SYN-ACK reply. + // Receive the SYN-ACK reply.w b := c.GetPacket() tcp := header.TCP(header.IPv4(b).Payload()) iss = seqnum.Value(tcp.SequenceNumber()) @@ -3469,12 +3469,18 @@ func TestListenBacklogFull(t *testing.T) { time.Sleep(50 * time.Millisecond) - // Now execute one more handshake. This should not be completed and - // delivered on an Accept() call as the backlog is full at this point. - irs, iss := executeHandshake(t, c, context.TestPort+uint16(listenBacklog), false /* synCookieInUse */) + // Now execute send one more SYN. The stack should not respond as the backlog + // is full at this point. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort + 2, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: seqnum.Value(789), + RcvWnd: 30000, + }) + c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond) - time.Sleep(50 * time.Millisecond) - // Try to accept the connection. + // Try to accept the connections in the backlog. we, ch := waiter.NewChannelEntry(nil) c.WQ.EventRegister(&we, waiter.EventIn) defer c.WQ.EventUnregister(&we) @@ -3506,16 +3512,8 @@ func TestListenBacklogFull(t *testing.T) { } } - // Now craft the ACK again and verify that the connection is now ready - // to be accepted. - c.SendPacket(nil, &context.Headers{ - SrcPort: context.TestPort + uint16(listenBacklog), - DstPort: context.StackPort, - Flags: header.TCPFlagAck, - SeqNum: irs + 1, - AckNum: iss + 1, - RcvWnd: 30000, - }) + // Now a new handshake must succeed. + executeHandshake(t, c, context.TestPort+2, false /*synCookieInUse */) newEP, _, err := c.EP.Accept() if err == tcpip.ErrWouldBlock { @@ -3531,6 +3529,7 @@ func TestListenBacklogFull(t *testing.T) { t.Fatalf("Timed out waiting for accept") } } + // Now verify that the TCP socket is usable and in a connected state. data := "Don't panic" newEP.Write(tcpip.SlicePayload(buffer.NewViewFromBytes([]byte(data))), tcpip.WriteOptions{}) @@ -3541,13 +3540,7 @@ func TestListenBacklogFull(t *testing.T) { } } -func TestListenBacklogFullSynCookieInUse(t *testing.T) { - saved := tcp.SynRcvdCountThreshold - defer func() { - tcp.SynRcvdCountThreshold = saved - }() - tcp.SynRcvdCountThreshold = 1 - +func TestListenSynRcvdQueueFull(t *testing.T) { c := context.New(t, defaultMTU) defer c.Cleanup() @@ -3566,48 +3559,72 @@ func TestListenBacklogFullSynCookieInUse(t *testing.T) { // Test acceptance. // Start listening. listenBacklog := 1 - portOffset := uint16(0) if err := c.EP.Listen(listenBacklog); err != nil { t.Fatalf("Listen failed: %v", err) } - executeHandshake(t, c, context.TestPort+portOffset, false) - portOffset++ - // Wait for this to be delivered to the accept queue. - time.Sleep(50 * time.Millisecond) + // Send two SYN's the first one should get a SYN-ACK, the + // second one should not get any response and is dropped as + // the synRcvd count will be equal to backlog. + irs := seqnum.Value(789) + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: seqnum.Value(789), + RcvWnd: 30000, + }) - nonCookieIRS, nonCookieISS := executeHandshake(t, c, context.TestPort+portOffset, false) + // Receive the SYN-ACK reply. + b := c.GetPacket() + tcp := header.TCP(header.IPv4(b).Payload()) + iss := seqnum.Value(tcp.SequenceNumber()) + tcpCheckers := []checker.TransportChecker{ + checker.SrcPort(context.StackPort), + checker.DstPort(context.TestPort), + checker.TCPFlags(header.TCPFlagAck | header.TCPFlagSyn), + checker.AckNum(uint32(irs) + 1), + } + checker.IPv4(t, b, checker.TCP(tcpCheckers...)) - // Since the backlog is full at this point this connection will not - // transition out of handshake and ignore the ACK. - // - // At this point there should be 1 completed connection in the backlog - // and one incomplete one pending for a final ACK and hence not ready to be - // delivered to the endpoint. + // Now execute send one more SYN. The stack should not respond as the backlog + // is full at this point. // - // Now execute one more handshake. This should not be completed and - // delivered on an Accept() call as the backlog is full at this point - // and there is already 1 pending endpoint. - // - // This one should use a SYN cookie as the synRcvdCount is equal to the - // SynRcvdCountThreshold. - time.Sleep(50 * time.Millisecond) - portOffset++ - irs, iss := executeHandshake(t, c, context.TestPort+portOffset, true) + // NOTE: we did not complete the handshake for the previous one so the + // accept backlog should be empty and there should be one connection in + // synRcvd state. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort + 1, + DstPort: context.StackPort, + Flags: header.TCPFlagSyn, + SeqNum: seqnum.Value(889), + RcvWnd: 30000, + }) + c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond) - time.Sleep(50 * time.Millisecond) + // Now complete the previous connection and verify that there is a connection + // to accept. + // Send ACK. + c.SendPacket(nil, &context.Headers{ + SrcPort: context.TestPort, + DstPort: context.StackPort, + Flags: header.TCPFlagAck, + SeqNum: irs + 1, + AckNum: iss + 1, + RcvWnd: 30000, + }) - // Verify that there is only one acceptable connection at this point. + // Try to accept the connections in the backlog. we, ch := waiter.NewChannelEntry(nil) c.WQ.EventRegister(&we, waiter.EventIn) defer c.WQ.EventUnregister(&we) - _, _, err = c.EP.Accept() + newEP, _, err := c.EP.Accept() if err == tcpip.ErrWouldBlock { // Wait for connection to be established. select { case <-ch: - _, _, err = c.EP.Accept() + newEP, _, err = c.EP.Accept() if err != nil { t.Fatalf("Accept failed: %v", err) } @@ -3617,27 +3634,68 @@ func TestListenBacklogFullSynCookieInUse(t *testing.T) { } } - // Now verify that there are no more connections that can be accepted. - _, _, err = c.EP.Accept() - if err != tcpip.ErrWouldBlock { - select { - case <-ch: - t.Fatalf("unexpected endpoint delivered on Accept: %+v", c.EP) - case <-time.After(1 * time.Second): - } + // Now verify that the TCP socket is usable and in a connected state. + data := "Don't panic" + newEP.Write(tcpip.SlicePayload(buffer.NewViewFromBytes([]byte(data))), tcpip.WriteOptions{}) + pkt := c.GetPacket() + tcp = header.TCP(header.IPv4(pkt).Payload()) + if string(tcp.Payload()) != data { + t.Fatalf("Unexpected data: got %v, want %v", string(tcp.Payload()), data) } +} + +func TestListenBacklogFullSynCookieInUse(t *testing.T) { + saved := tcp.SynRcvdCountThreshold + defer func() { + tcp.SynRcvdCountThreshold = saved + }() + tcp.SynRcvdCountThreshold = 1 + + c := context.New(t, defaultMTU) + defer c.Cleanup() - // Now send an ACK for the half completed connection + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + // Bind to wildcard. + if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { + t.Fatalf("Bind failed: %v", err) + } + + // Test acceptance. + // Start listening. + listenBacklog := 1 + portOffset := uint16(0) + if err := c.EP.Listen(listenBacklog); err != nil { + t.Fatalf("Listen failed: %v", err) + } + + executeHandshake(t, c, context.TestPort+portOffset, false) + portOffset++ + // Wait for this to be delivered to the accept queue. + time.Sleep(50 * time.Millisecond) + + // Send a SYN request. + irs := seqnum.Value(789) c.SendPacket(nil, &context.Headers{ - SrcPort: context.TestPort + portOffset - 1, + SrcPort: context.TestPort, DstPort: context.StackPort, - Flags: header.TCPFlagAck, - SeqNum: nonCookieIRS + 1, - AckNum: nonCookieISS + 1, + Flags: header.TCPFlagSyn, + SeqNum: irs, RcvWnd: 30000, }) + // The Syn should be dropped as the endpoint's backlog is full. + c.CheckNoPacketTimeout("unexpected packet received", 50*time.Millisecond) + + // Verify that there is only one acceptable connection at this point. + we, ch := waiter.NewChannelEntry(nil) + c.WQ.EventRegister(&we, waiter.EventIn) + defer c.WQ.EventUnregister(&we) - // Verify that the connection is now delivered to the backlog. _, _, err = c.EP.Accept() if err == tcpip.ErrWouldBlock { // Wait for connection to be established. @@ -3653,41 +3711,15 @@ func TestListenBacklogFullSynCookieInUse(t *testing.T) { } } - // Finally send an ACK for the connection that used a cookie and verify that - // it's also completed and delivered. - c.SendPacket(nil, &context.Headers{ - SrcPort: context.TestPort + portOffset, - DstPort: context.StackPort, - Flags: header.TCPFlagAck, - SeqNum: irs, - AckNum: iss, - RcvWnd: 30000, - }) - - time.Sleep(50 * time.Millisecond) - newEP, _, err := c.EP.Accept() - if err == tcpip.ErrWouldBlock { - // Wait for connection to be established. + // Now verify that there are no more connections that can be accepted. + _, _, err = c.EP.Accept() + if err != tcpip.ErrWouldBlock { select { case <-ch: - newEP, _, err = c.EP.Accept() - if err != nil { - t.Fatalf("Accept failed: %v", err) - } - + t.Fatalf("unexpected endpoint delivered on Accept: %+v", c.EP) case <-time.After(1 * time.Second): - t.Fatalf("Timed out waiting for accept") } } - - // Now verify that the TCP socket is usable and in a connected state. - data := "Don't panic" - newEP.Write(tcpip.SlicePayload(buffer.NewViewFromBytes([]byte(data))), tcpip.WriteOptions{}) - b := c.GetPacket() - tcp := header.TCP(header.IPv4(b).Payload()) - if string(tcp.Payload()) != data { - t.Fatalf("Unexpected data: got %v, want %v", string(tcp.Payload()), data) - } } func TestPassiveConnectionAttemptIncrement(t *testing.T) { @@ -3761,18 +3793,12 @@ func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) { } srcPort := uint16(context.TestPort) - // Now attempt 3 handshakes, the first two will fill up the accept and the SYN-RCVD - // queue for the endpoint. + // Now attempt a handshakes it will fill up the accept backlog. executeHandshake(t, c, srcPort, false) // Give time for the final ACK to be processed as otherwise the next handshake could // get accepted before the previous one based on goroutine scheduling. time.Sleep(50 * time.Millisecond) - irs, iss := executeHandshake(t, c, srcPort+1, false) - - // Wait for a short while for the accepted connection to be delivered to - // the channel before trying to send the 3rd SYN. - time.Sleep(40 * time.Millisecond) want := stats.TCP.ListenOverflowSynDrop.Value() + 1 @@ -3810,49 +3836,6 @@ func TestPassiveFailedConnectionAttemptIncrement(t *testing.T) { t.Fatalf("Timed out waiting for accept") } } - - // Now complete the next connection in SYN-RCVD state as it should - // have dropped the final ACK to the handshake due to accept queue - // being full. - c.SendPacket(nil, &context.Headers{ - SrcPort: srcPort + 1, - DstPort: context.StackPort, - Flags: header.TCPFlagAck, - SeqNum: irs + 1, - AckNum: iss + 1, - RcvWnd: 30000, - }) - - // Now check that there is one more acceptable connections. - _, _, err = c.EP.Accept() - if err == tcpip.ErrWouldBlock { - // Wait for connection to be established. - select { - case <-ch: - _, _, err = c.EP.Accept() - if err != nil { - t.Fatalf("Accept failed: %v", err) - } - - case <-time.After(1 * time.Second): - t.Fatalf("Timed out waiting for accept") - } - } - - // Try and accept a 3rd one this should fail. - _, _, err = c.EP.Accept() - if err == tcpip.ErrWouldBlock { - // Wait for connection to be established. - select { - case <-ch: - ep, _, err = c.EP.Accept() - if err == nil { - t.Fatalf("Accept succeeded when it should have failed got: %+v", ep) - } - - case <-time.After(1 * time.Second): - } - } } func TestEndpointBindListenAcceptState(t *testing.T) { diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc index 9b3b70b01..df31d25b5 100644 --- a/test/syscalls/linux/socket_inet_loopback.cc +++ b/test/syscalls/linux/socket_inet_loopback.cc @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -144,6 +145,66 @@ TEST_P(SocketInetLoopbackTest, TCP) { ASSERT_THAT(shutdown(conn_fd.get(), SHUT_RDWR), SyscallSucceeds()); } +TEST_P(SocketInetLoopbackTest, TCPbacklog) { + auto const& param = GetParam(); + + TestAddress const& listener = param.listener; + TestAddress const& connector = param.connector; + + // Create the listening socket. + const FileDescriptor listen_fd = ASSERT_NO_ERRNO_AND_VALUE( + Socket(listener.family(), SOCK_STREAM, IPPROTO_TCP)); + sockaddr_storage listen_addr = listener.addr; + ASSERT_THAT(bind(listen_fd.get(), reinterpret_cast(&listen_addr), + listener.addr_len), + SyscallSucceeds()); + ASSERT_THAT(listen(listen_fd.get(), 2), SyscallSucceeds()); + + // Get the port bound by the listening socket. + socklen_t addrlen = listener.addr_len; + ASSERT_THAT(getsockname(listen_fd.get(), + reinterpret_cast(&listen_addr), &addrlen), + SyscallSucceeds()); + uint16_t const port = + ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); + int i = 0; + while (1) { + int ret; + + // Connect to the listening socket. + const FileDescriptor conn_fd = ASSERT_NO_ERRNO_AND_VALUE( + Socket(connector.family(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP)); + sockaddr_storage conn_addr = connector.addr; + ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); + ret = connect(conn_fd.get(), reinterpret_cast(&conn_addr), + connector.addr_len); + if (ret != 0) { + EXPECT_THAT(ret, SyscallFailsWithErrno(EINPROGRESS)); + struct pollfd pfd = { + .fd = conn_fd.get(), + .events = POLLOUT, + }; + ret = poll(&pfd, 1, 3000); + if (ret == 0) break; + EXPECT_THAT(ret, SyscallSucceedsWithValue(1)); + } + EXPECT_THAT(RetryEINTR(send)(conn_fd.get(), &i, sizeof(i), 0), + SyscallSucceedsWithValue(sizeof(i))); + ASSERT_THAT(shutdown(conn_fd.get(), SHUT_RDWR), SyscallSucceeds()); + i++; + } + + for (; i != 0; i--) { + // Accept the connection. + // + // We have to assign a name to the accepted socket, as unamed temporary + // objects are destructed upon full evaluation of the expression it is in, + // potentially causing the connecting socket to fail to shutdown properly. + auto accepted = + ASSERT_NO_ERRNO_AND_VALUE(Accept(listen_fd.get(), nullptr, nullptr)); + } +} + INSTANTIATE_TEST_SUITE_P( All, SocketInetLoopbackTest, ::testing::Values( @@ -198,7 +259,7 @@ TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread) { ASSERT_THAT( bind(fd, reinterpret_cast(&listen_addr), listener.addr_len), SyscallSucceeds()); - ASSERT_THAT(listen(fd, 512), SyscallSucceeds()); + ASSERT_THAT(listen(fd, 40), SyscallSucceeds()); // On the first bind we need to determine which port was bound. if (i != 0) { -- cgit v1.2.3 From df110ad4fe571721a7eb4a5a1f9ce92584ef7809 Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Tue, 11 Jun 2019 19:23:27 -0700 Subject: Eat sendfile partial error For sendfile(2), we propagate a TCP error through the system call layer. This should be eaten if there is a partial result. This change also adds a test to ensure that there is no panic in this case, for both TCP sockets and unix domain sockets. PiperOrigin-RevId: 252746192 --- pkg/sentry/syscalls/linux/error.go | 4 + test/syscalls/linux/sendfile_socket.cc | 170 ++++++++++++++++++++++++--------- 2 files changed, 131 insertions(+), 43 deletions(-) (limited to 'test/syscalls/linux') diff --git a/pkg/sentry/syscalls/linux/error.go b/pkg/sentry/syscalls/linux/error.go index 1ba3695fb..72146ea63 100644 --- a/pkg/sentry/syscalls/linux/error.go +++ b/pkg/sentry/syscalls/linux/error.go @@ -92,6 +92,10 @@ func handleIOError(t *kernel.Task, partialResult bool, err, intr error, op strin // TODO(gvisor.dev/issue/161): In some cases SIGPIPE should // also be sent to the application. return nil + case syserror.ECONNRESET: + // For TCP sendfile connections, we may have a reset. But we + // should just return n as the result. + return nil case syserror.ErrWouldBlock: // Syscall would block, but completed a partial read/write. // This case should only be returned by IssueIO for nonblocking diff --git a/test/syscalls/linux/sendfile_socket.cc b/test/syscalls/linux/sendfile_socket.cc index 66adda515..1c56540bc 100644 --- a/test/syscalls/linux/sendfile_socket.cc +++ b/test/syscalls/linux/sendfile_socket.cc @@ -33,9 +33,69 @@ namespace gvisor { namespace testing { namespace { +class SendFileTest : public ::testing::TestWithParam { + protected: + PosixErrorOr> Sockets() { + // Bind a server socket. + int family = GetParam(); + struct sockaddr server_addr = {}; + switch (family) { + case AF_INET: { + struct sockaddr_in *server_addr_in = + reinterpret_cast(&server_addr); + server_addr_in->sin_family = family; + server_addr_in->sin_addr.s_addr = INADDR_ANY; + break; + } + case AF_UNIX: { + struct sockaddr_un *server_addr_un = + reinterpret_cast(&server_addr); + server_addr_un->sun_family = family; + server_addr_un->sun_path[0] = '\0'; + break; + } + default: + return PosixError(EINVAL); + } + int server = socket(family, SOCK_STREAM, 0); + if (bind(server, &server_addr, sizeof(server_addr)) < 0) { + return PosixError(errno); + } + if (listen(server, 1) < 0) { + close(server); + return PosixError(errno); + } + + // Fetch the address; both are anonymous. + socklen_t length = sizeof(server_addr); + if (getsockname(server, &server_addr, &length) < 0) { + close(server); + return PosixError(errno); + } + + // Connect the client. + int client = socket(family, SOCK_STREAM, 0); + if (connect(client, &server_addr, length) < 0) { + close(server); + close(client); + return PosixError(errno); + } + + // Accept on the server. + int server_client = accept(server, nullptr, 0); + if (server_client < 0) { + close(server); + close(client); + return PosixError(errno); + } + close(server); + return std::make_tuple(client, server_client); + } +}; + // Sends large file to exercise the path that read and writes data multiple // times, esp. when more data is read than can be written. -TEST(SendFileTest, SendMultiple) { +TEST_P(SendFileTest, SendMultiple) { std::vector data(5 * 1024 * 1024); RandomizeBuffer(data.data(), data.size()); @@ -45,34 +105,20 @@ TEST(SendFileTest, SendMultiple) { TempPath::kDefaultFileMode)); const TempPath out_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile()); - // Use a socket for target file to make the write window small. - const FileDescriptor server(socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)); - ASSERT_THAT(server.get(), SyscallSucceeds()); - - struct sockaddr_in server_addr = {}; - server_addr.sin_family = AF_INET; - server_addr.sin_addr.s_addr = INADDR_ANY; - ASSERT_THAT( - bind(server.get(), reinterpret_cast(&server_addr), - sizeof(server_addr)), - SyscallSucceeds()); - ASSERT_THAT(listen(server.get(), 1), SyscallSucceeds()); + // Create sockets. + std::tuple fds = ASSERT_NO_ERRNO_AND_VALUE(Sockets()); + const FileDescriptor server(std::get<0>(fds)); + FileDescriptor client(std::get<1>(fds)); // non-const, reset is used. // Thread that reads data from socket and dumps to a file. - ScopedThread th([&server, &out_file, &server_addr] { - socklen_t addrlen = sizeof(server_addr); - const FileDescriptor fd(RetryEINTR(accept)( - server.get(), reinterpret_cast(&server_addr), - &addrlen)); - ASSERT_THAT(fd.get(), SyscallSucceeds()); - + ScopedThread th([&] { FileDescriptor outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_WRONLY)); // Read until socket is closed. char buf[10240]; for (int cnt = 0;; cnt++) { - int r = RetryEINTR(read)(fd.get(), buf, sizeof(buf)); + int r = RetryEINTR(read)(server.get(), buf, sizeof(buf)); // We cannot afford to save on every read() call. if (cnt % 1000 == 0) { ASSERT_THAT(r, SyscallSucceeds()); @@ -99,25 +145,6 @@ TEST(SendFileTest, SendMultiple) { const FileDescriptor inf = ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY)); - FileDescriptor outf(socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)); - ASSERT_THAT(outf.get(), SyscallSucceeds()); - - // Get the port bound by the listening socket. - socklen_t addrlen = sizeof(server_addr); - ASSERT_THAT(getsockname(server.get(), - reinterpret_cast(&server_addr), &addrlen), - SyscallSucceeds()); - - struct sockaddr_in addr = {}; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = inet_addr("127.0.0.1"); - addr.sin_port = server_addr.sin_port; - std::cout << "Connecting on port=" << server_addr.sin_port; - ASSERT_THAT( - RetryEINTR(connect)( - outf.get(), reinterpret_cast(&addr), sizeof(addr)), - SyscallSucceeds()); - int cnt = 0; for (size_t sent = 0; sent < data.size(); cnt++) { const size_t remain = data.size() - sent; @@ -125,7 +152,7 @@ TEST(SendFileTest, SendMultiple) { << ", remain=" << remain; // Send data and verify that sendfile returns the correct value. - int res = sendfile(outf.get(), inf.get(), nullptr, remain); + int res = sendfile(client.get(), inf.get(), nullptr, remain); // We cannot afford to save on every sendfile() call. if (cnt % 120 == 0) { MaybeSave(); @@ -142,17 +169,74 @@ TEST(SendFileTest, SendMultiple) { } // Close socket to stop thread. - outf.reset(); + client.reset(); th.Join(); // Verify that the output file has the correct data. - outf = ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY)); + const FileDescriptor outf = + ASSERT_NO_ERRNO_AND_VALUE(Open(out_file.path(), O_RDONLY)); std::vector actual(data.size(), '\0'); ASSERT_THAT(RetryEINTR(read)(outf.get(), actual.data(), actual.size()), SyscallSucceedsWithValue(actual.size())); ASSERT_EQ(memcmp(data.data(), actual.data(), data.size()), 0); } +TEST_P(SendFileTest, Shutdown) { + // Create a socket. + std::tuple fds = ASSERT_NO_ERRNO_AND_VALUE(Sockets()); + const FileDescriptor client(std::get<0>(fds)); + FileDescriptor server(std::get<1>(fds)); // non-const, released below. + + // If this is a TCP socket, then turn off linger. + if (GetParam() == AF_INET) { + struct linger sl; + sl.l_onoff = 1; + sl.l_linger = 0; + ASSERT_THAT( + setsockopt(server.get(), SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)), + SyscallSucceeds()); + } + + // Create a 1m file with random data. + std::vector data(1024 * 1024); + RandomizeBuffer(data.data(), data.size()); + const TempPath in_file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFileWith( + GetAbsoluteTestTmpdir(), absl::string_view(data.data(), data.size()), + TempPath::kDefaultFileMode)); + const FileDescriptor inf = + ASSERT_NO_ERRNO_AND_VALUE(Open(in_file.path(), O_RDONLY)); + + // Read some data, then shutdown the socket. We don't actually care about + // checking the contents (other tests do that), so we just re-use the same + // buffer as above. + ScopedThread t([&]() { + int done = 0; + while (done < data.size()) { + int n = read(server.get(), data.data(), data.size()); + ASSERT_THAT(n, SyscallSucceeds()); + done += n; + } + // Close the server side socket. + ASSERT_THAT(close(server.release()), SyscallSucceeds()); + }); + + // Continuously stream from the file to the socket. Note we do not assert + // that a specific amount of data has been written at any time, just that some + // data is written. Eventually, we should get a connection reset error. + while (1) { + off_t offset = 0; // Always read from the start. + int n = sendfile(client.get(), inf.get(), &offset, data.size()); + EXPECT_THAT(n, AnyOf(SyscallFailsWithErrno(ECONNRESET), + SyscallFailsWithErrno(EPIPE), SyscallSucceeds())); + if (n <= 0) { + break; + } + } +} + +INSTANTIATE_TEST_SUITE_P(AddressFamily, SendFileTest, + ::testing::Values(AF_UNIX, AF_INET)); + } // namespace } // namespace testing } // namespace gvisor -- cgit v1.2.3 From 70578806e8d3e01fae2249b3e602cd5b05d378a0 Mon Sep 17 00:00:00 2001 From: Bhasker Hariharan Date: Wed, 12 Jun 2019 13:34:47 -0700 Subject: Add support for TCP_CONGESTION socket option. This CL also cleans up the error returned for setting congestion control which was incorrectly returning EINVAL instead of ENOENT. PiperOrigin-RevId: 252889093 --- pkg/sentry/socket/epsocket/epsocket.go | 30 +++++ pkg/tcpip/tcpip.go | 8 ++ pkg/tcpip/transport/tcp/BUILD | 1 + pkg/tcpip/transport/tcp/cubic.go | 3 +- pkg/tcpip/transport/tcp/cubic_state.go | 29 +++++ pkg/tcpip/transport/tcp/endpoint.go | 45 +++++++- pkg/tcpip/transport/tcp/protocol.go | 22 ++-- pkg/tcpip/transport/tcp/snd.go | 10 +- pkg/tcpip/transport/tcp/tcp_test.go | 112 +++++++++++++++--- pkg/tcpip/transport/tcp/testing/context/context.go | 52 +++++---- test/syscalls/linux/socket_ip_tcp_generic.cc | 104 +++++++++++++++++ test/syscalls/linux/tcp_socket.cc | 127 +++++++++++++++++++++ 12 files changed, 481 insertions(+), 62 deletions(-) create mode 100644 pkg/tcpip/transport/tcp/cubic_state.go (limited to 'test/syscalls/linux') diff --git a/pkg/sentry/socket/epsocket/epsocket.go b/pkg/sentry/socket/epsocket/epsocket.go index f67451179..a50798cb3 100644 --- a/pkg/sentry/socket/epsocket/epsocket.go +++ b/pkg/sentry/socket/epsocket/epsocket.go @@ -920,6 +920,30 @@ func getSockOptTCP(t *kernel.Task, ep commonEndpoint, name, outLen int) (interfa t.Kernel().EmitUnimplementedEvent(t) + case linux.TCP_CONGESTION: + if outLen <= 0 { + return nil, syserr.ErrInvalidArgument + } + + var v tcpip.CongestionControlOption + if err := ep.GetSockOpt(&v); err != nil { + return nil, syserr.TranslateNetstackError(err) + } + + // We match linux behaviour here where it returns the lower of + // TCP_CA_NAME_MAX bytes or the value of the option length. + // + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const tcpCANameMax = 16 + + toCopy := tcpCANameMax + if outLen < tcpCANameMax { + toCopy = outLen + } + b := make([]byte, toCopy) + copy(b, v) + return b, nil + default: emitUnimplementedEventTCP(t, name) } @@ -1222,6 +1246,12 @@ func setSockOptTCP(t *kernel.Task, ep commonEndpoint, name int, optVal []byte) * } return syserr.TranslateNetstackError(ep.SetSockOpt(tcpip.KeepaliveIntervalOption(time.Second * time.Duration(v)))) + case linux.TCP_CONGESTION: + v := tcpip.CongestionControlOption(optVal) + if err := ep.SetSockOpt(v); err != nil { + return syserr.TranslateNetstackError(err) + } + return nil case linux.TCP_REPAIR_OPTIONS: t.Kernel().EmitUnimplementedEvent(t) diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go index 85ef014d0..04c776205 100644 --- a/pkg/tcpip/tcpip.go +++ b/pkg/tcpip/tcpip.go @@ -472,6 +472,14 @@ type KeepaliveIntervalOption time.Duration // closed. type KeepaliveCountOption int +// CongestionControlOption is used by SetSockOpt/GetSockOpt to set/get +// the current congestion control algorithm. +type CongestionControlOption string + +// AvailableCongestionControlOption is used to query the supported congestion +// control algorithms. +type AvailableCongestionControlOption string + // MulticastTTLOption is used by SetSockOpt/GetSockOpt to control the default // TTL value for multicast messages. The default is 1. type MulticastTTLOption uint8 diff --git a/pkg/tcpip/transport/tcp/BUILD b/pkg/tcpip/transport/tcp/BUILD index 9db38196b..a9dbfb930 100644 --- a/pkg/tcpip/transport/tcp/BUILD +++ b/pkg/tcpip/transport/tcp/BUILD @@ -21,6 +21,7 @@ go_library( "accept.go", "connect.go", "cubic.go", + "cubic_state.go", "endpoint.go", "endpoint_state.go", "forwarder.go", diff --git a/pkg/tcpip/transport/tcp/cubic.go b/pkg/tcpip/transport/tcp/cubic.go index e618cd2b9..7b1f5e763 100644 --- a/pkg/tcpip/transport/tcp/cubic.go +++ b/pkg/tcpip/transport/tcp/cubic.go @@ -23,6 +23,7 @@ import ( // control algorithm state. // // See: https://tools.ietf.org/html/rfc8312. +// +stateify savable type cubicState struct { // wLastMax is the previous wMax value. wLastMax float64 @@ -33,7 +34,7 @@ type cubicState struct { // t denotes the time when the current congestion avoidance // was entered. - t time.Time + t time.Time `state:".(unixTime)"` // numCongestionEvents tracks the number of congestion events since last // RTO. diff --git a/pkg/tcpip/transport/tcp/cubic_state.go b/pkg/tcpip/transport/tcp/cubic_state.go new file mode 100644 index 000000000..d0f58cfaf --- /dev/null +++ b/pkg/tcpip/transport/tcp/cubic_state.go @@ -0,0 +1,29 @@ +// Copyright 2019 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcp + +import ( + "time" +) + +// saveT is invoked by stateify. +func (c *cubicState) saveT() unixTime { + return unixTime{c.t.Unix(), c.t.UnixNano()} +} + +// loadT is invoked by stateify. +func (c *cubicState) loadT(unix unixTime) { + c.t = time.Unix(unix.second, unix.nano) +} diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go index 23422ca5e..1efe9d3fb 100644 --- a/pkg/tcpip/transport/tcp/endpoint.go +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -17,6 +17,7 @@ package tcp import ( "fmt" "math" + "strings" "sync" "sync/atomic" "time" @@ -286,7 +287,7 @@ type endpoint struct { // cc stores the name of the Congestion Control algorithm to use for // this endpoint. - cc CongestionControlOption + cc tcpip.CongestionControlOption // The following are used when a "packet too big" control packet is // received. They are protected by sndBufMu. They are used to @@ -394,7 +395,7 @@ func newEndpoint(stack *stack.Stack, netProto tcpip.NetworkProtocolNumber, waite e.rcvBufSize = rs.Default } - var cs CongestionControlOption + var cs tcpip.CongestionControlOption if err := stack.TransportProtocolOption(ProtocolNumber, &cs); err == nil { e.cc = cs } @@ -898,6 +899,40 @@ func (e *endpoint) SetSockOpt(opt interface{}) *tcpip.Error { e.mu.Unlock() return nil + case tcpip.CongestionControlOption: + // Query the available cc algorithms in the stack and + // validate that the specified algorithm is actually + // supported in the stack. + var avail tcpip.AvailableCongestionControlOption + if err := e.stack.TransportProtocolOption(ProtocolNumber, &avail); err != nil { + return err + } + availCC := strings.Split(string(avail), " ") + for _, cc := range availCC { + if v == tcpip.CongestionControlOption(cc) { + // Acquire the work mutex as we may need to + // reinitialize the congestion control state. + e.mu.Lock() + state := e.state + e.cc = v + e.mu.Unlock() + switch state { + case StateEstablished: + e.workMu.Lock() + e.mu.Lock() + if e.state == state { + e.snd.cc = e.snd.initCongestionControl(e.cc) + } + e.mu.Unlock() + e.workMu.Unlock() + } + return nil + } + } + + // Linux returns ENOENT when an invalid congestion + // control algorithm is specified. + return tcpip.ErrNoSuchFile default: return nil } @@ -1067,6 +1102,12 @@ func (e *endpoint) GetSockOpt(opt interface{}) *tcpip.Error { } return nil + case *tcpip.CongestionControlOption: + e.mu.Lock() + *o = e.cc + e.mu.Unlock() + return nil + default: return tcpip.ErrUnknownProtocolOption } diff --git a/pkg/tcpip/transport/tcp/protocol.go b/pkg/tcpip/transport/tcp/protocol.go index b31bcccfa..59f4009a1 100644 --- a/pkg/tcpip/transport/tcp/protocol.go +++ b/pkg/tcpip/transport/tcp/protocol.go @@ -79,13 +79,6 @@ const ( ccCubic = "cubic" ) -// CongestionControlOption sets the current congestion control algorithm. -type CongestionControlOption string - -// AvailableCongestionControlOption returns the supported congestion control -// algorithms. -type AvailableCongestionControlOption string - type protocol struct { mu sync.Mutex sackEnabled bool @@ -93,7 +86,6 @@ type protocol struct { recvBufferSize ReceiveBufferSizeOption congestionControl string availableCongestionControl []string - allowedCongestionControl []string } // Number returns the tcp protocol number. @@ -188,7 +180,7 @@ func (p *protocol) SetOption(option interface{}) *tcpip.Error { p.mu.Unlock() return nil - case CongestionControlOption: + case tcpip.CongestionControlOption: for _, c := range p.availableCongestionControl { if string(v) == c { p.mu.Lock() @@ -197,7 +189,9 @@ func (p *protocol) SetOption(option interface{}) *tcpip.Error { return nil } } - return tcpip.ErrInvalidOptionValue + // linux returns ENOENT when an invalid congestion control + // is specified. + return tcpip.ErrNoSuchFile default: return tcpip.ErrUnknownProtocolOption } @@ -223,14 +217,14 @@ func (p *protocol) Option(option interface{}) *tcpip.Error { *v = p.recvBufferSize p.mu.Unlock() return nil - case *CongestionControlOption: + case *tcpip.CongestionControlOption: p.mu.Lock() - *v = CongestionControlOption(p.congestionControl) + *v = tcpip.CongestionControlOption(p.congestionControl) p.mu.Unlock() return nil - case *AvailableCongestionControlOption: + case *tcpip.AvailableCongestionControlOption: p.mu.Lock() - *v = AvailableCongestionControlOption(strings.Join(p.availableCongestionControl, " ")) + *v = tcpip.AvailableCongestionControlOption(strings.Join(p.availableCongestionControl, " ")) p.mu.Unlock() return nil default: diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go index b236d7af2..daa3e8341 100644 --- a/pkg/tcpip/transport/tcp/snd.go +++ b/pkg/tcpip/transport/tcp/snd.go @@ -194,8 +194,6 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint s := &sender{ ep: ep, - sndCwnd: InitialCwnd, - sndSsthresh: math.MaxInt64, sndWnd: sndWnd, sndUna: iss + 1, sndNxt: iss + 1, @@ -238,7 +236,13 @@ func newSender(ep *endpoint, iss, irs seqnum.Value, sndWnd seqnum.Size, mss uint return s } -func (s *sender) initCongestionControl(congestionControlName CongestionControlOption) congestionControl { +// initCongestionControl initializes the specified congestion control module and +// returns a handle to it. It also initializes the sndCwnd and sndSsThresh to +// their initial values. +func (s *sender) initCongestionControl(congestionControlName tcpip.CongestionControlOption) congestionControl { + s.sndCwnd = InitialCwnd + s.sndSsthresh = math.MaxInt64 + switch congestionControlName { case ccCubic: return newCubicCC(s) diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go index 779ca8b76..7d8987219 100644 --- a/pkg/tcpip/transport/tcp/tcp_test.go +++ b/pkg/tcpip/transport/tcp/tcp_test.go @@ -3205,13 +3205,14 @@ func TestTCPEndpointProbe(t *testing.T) { } } -func TestSetCongestionControl(t *testing.T) { +func TestStackSetCongestionControl(t *testing.T) { testCases := []struct { - cc tcp.CongestionControlOption - mustPass bool + cc tcpip.CongestionControlOption + err *tcpip.Error }{ - {"reno", true}, - {"cubic", true}, + {"reno", nil}, + {"cubic", nil}, + {"blahblah", tcpip.ErrNoSuchFile}, } for _, tc := range testCases { @@ -3221,62 +3222,135 @@ func TestSetCongestionControl(t *testing.T) { s := c.Stack() - if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tc.cc); err != nil && tc.mustPass { - t.Fatalf("s.SetTransportProtocolOption(%v, %v) = %v, want not-nil", tcp.ProtocolNumber, tc.cc, err) + var oldCC tcpip.CongestionControlOption + if err := s.TransportProtocolOption(tcp.ProtocolNumber, &oldCC); err != nil { + t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &oldCC, err) } - var cc tcp.CongestionControlOption + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tc.cc); err != tc.err { + t.Fatalf("s.SetTransportProtocolOption(%v, %v) = %v, want %v", tcp.ProtocolNumber, tc.cc, err, tc.err) + } + + var cc tcpip.CongestionControlOption if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil { t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &cc, err) } - if got, want := cc, tc.cc; got != want { + + got, want := cc, oldCC + // If SetTransportProtocolOption is expected to succeed + // then the returned value for congestion control should + // match the one specified in the + // SetTransportProtocolOption call above, else it should + // be what it was before the call to + // SetTransportProtocolOption. + if tc.err == nil { + want = tc.cc + } + if got != want { t.Fatalf("got congestion control: %v, want: %v", got, want) } }) } } -func TestAvailableCongestionControl(t *testing.T) { +func TestStackAvailableCongestionControl(t *testing.T) { c := context.New(t, 1500) defer c.Cleanup() s := c.Stack() // Query permitted congestion control algorithms. - var aCC tcp.AvailableCongestionControlOption + var aCC tcpip.AvailableCongestionControlOption if err := s.TransportProtocolOption(tcp.ProtocolNumber, &aCC); err != nil { t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &aCC, err) } - if got, want := aCC, tcp.AvailableCongestionControlOption("reno cubic"); got != want { - t.Fatalf("got tcp.AvailableCongestionControlOption: %v, want: %v", got, want) + if got, want := aCC, tcpip.AvailableCongestionControlOption("reno cubic"); got != want { + t.Fatalf("got tcpip.AvailableCongestionControlOption: %v, want: %v", got, want) } } -func TestSetAvailableCongestionControl(t *testing.T) { +func TestStackSetAvailableCongestionControl(t *testing.T) { c := context.New(t, 1500) defer c.Cleanup() s := c.Stack() // Setting AvailableCongestionControlOption should fail. - aCC := tcp.AvailableCongestionControlOption("xyz") + aCC := tcpip.AvailableCongestionControlOption("xyz") if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &aCC); err == nil { t.Fatalf("s.TransportProtocolOption(%v, %v) = nil, want non-nil", tcp.ProtocolNumber, &aCC) } // Verify that we still get the expected list of congestion control options. - var cc tcp.AvailableCongestionControlOption + var cc tcpip.AvailableCongestionControlOption if err := s.TransportProtocolOption(tcp.ProtocolNumber, &cc); err != nil { t.Fatalf("s.TransportProtocolOption(%v, %v) = %v", tcp.ProtocolNumber, &cc, err) } - if got, want := cc, tcp.AvailableCongestionControlOption("reno cubic"); got != want { - t.Fatalf("got tcp.AvailableCongestionControlOption: %v, want: %v", got, want) + if got, want := cc, tcpip.AvailableCongestionControlOption("reno cubic"); got != want { + t.Fatalf("got tcpip.AvailableCongestionControlOption: %v, want: %v", got, want) + } +} + +func TestEndpointSetCongestionControl(t *testing.T) { + testCases := []struct { + cc tcpip.CongestionControlOption + err *tcpip.Error + }{ + {"reno", nil}, + {"cubic", nil}, + {"blahblah", tcpip.ErrNoSuchFile}, + } + + for _, connected := range []bool{false, true} { + for _, tc := range testCases { + t.Run(fmt.Sprintf("SetSockOpt(.., %v) w/ connected = %v", tc.cc, connected), func(t *testing.T) { + c := context.New(t, 1500) + defer c.Cleanup() + + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.Stack().NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + t.Fatalf("NewEndpoint failed: %v", err) + } + + var oldCC tcpip.CongestionControlOption + if err := c.EP.GetSockOpt(&oldCC); err != nil { + t.Fatalf("c.EP.SockOpt(%v) = %v", &oldCC, err) + } + + if connected { + c.Connect(789 /* iss */, 32768 /* rcvWnd */, nil) + } + + if err := c.EP.SetSockOpt(tc.cc); err != tc.err { + t.Fatalf("c.EP.SetSockOpt(%v) = %v, want %v", tc.cc, err, tc.err) + } + + var cc tcpip.CongestionControlOption + if err := c.EP.GetSockOpt(&cc); err != nil { + t.Fatalf("c.EP.SockOpt(%v) = %v", &cc, err) + } + + got, want := cc, oldCC + // If SetSockOpt is expected to succeed then the + // returned value for congestion control should match + // the one specified in the SetSockOpt above, else it + // should be what it was before the call to SetSockOpt. + if tc.err == nil { + want = tc.cc + } + if got != want { + t.Fatalf("got congestion control: %v, want: %v", got, want) + } + }) + } } } func enableCUBIC(t *testing.T, c *context.Context) { t.Helper() - opt := tcp.CongestionControlOption("cubic") + opt := tcpip.CongestionControlOption("cubic") if err := c.Stack().SetTransportProtocolOption(tcp.ProtocolNumber, opt); err != nil { t.Fatalf("c.s.SetTransportProtocolOption(tcp.ProtocolNumber, %v = %v", opt, err) } diff --git a/pkg/tcpip/transport/tcp/testing/context/context.go b/pkg/tcpip/transport/tcp/testing/context/context.go index 69a43b6f4..a4d89e24d 100644 --- a/pkg/tcpip/transport/tcp/testing/context/context.go +++ b/pkg/tcpip/transport/tcp/testing/context/context.go @@ -520,35 +520,21 @@ func (c *Context) CreateConnected(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf c.CreateConnectedWithRawOptions(iss, rcvWnd, epRcvBuf, nil) } -// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends -// the specified option bytes as the Option field in the initial SYN packet. +// Connect performs the 3-way handshake for c.EP with the provided Initial +// Sequence Number (iss) and receive window(rcvWnd) and any options if +// specified. // // It also sets the receive buffer for the endpoint to the specified // value in epRcvBuf. -func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption, options []byte) { - // Create TCP endpoint. - var err *tcpip.Error - c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) - if err != nil { - c.t.Fatalf("NewEndpoint failed: %v", err) - } - if got, want := tcp.EndpointState(c.EP.State()), tcp.StateInitial; got != want { - c.t.Errorf("Unexpected endpoint state: want %v, got %v", want, got) - } - - if epRcvBuf != nil { - if err := c.EP.SetSockOpt(*epRcvBuf); err != nil { - c.t.Fatalf("SetSockOpt failed failed: %v", err) - } - } - +// +// PreCondition: c.EP must already be created. +func (c *Context) Connect(iss seqnum.Value, rcvWnd seqnum.Size, options []byte) { // Start connection attempt. waitEntry, notifyCh := waiter.NewChannelEntry(nil) c.WQ.EventRegister(&waitEntry, waiter.EventOut) defer c.WQ.EventUnregister(&waitEntry) - err = c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort}) - if err != tcpip.ErrConnectStarted { + if err := c.EP.Connect(tcpip.FullAddress{Addr: TestAddr, Port: TestPort}); err != tcpip.ErrConnectStarted { c.t.Fatalf("Unexpected return value from Connect: %v", err) } @@ -590,8 +576,7 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum. // Wait for connection to be established. select { case <-notifyCh: - err = c.EP.GetSockOpt(tcpip.ErrorOption{}) - if err != nil { + if err := c.EP.GetSockOpt(tcpip.ErrorOption{}); err != nil { c.t.Fatalf("Unexpected error when connecting: %v", err) } case <-time.After(1 * time.Second): @@ -604,6 +589,27 @@ func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum. c.Port = tcpHdr.SourcePort() } +// CreateConnectedWithRawOptions creates a connected TCP endpoint and sends +// the specified option bytes as the Option field in the initial SYN packet. +// +// It also sets the receive buffer for the endpoint to the specified +// value in epRcvBuf. +func (c *Context) CreateConnectedWithRawOptions(iss seqnum.Value, rcvWnd seqnum.Size, epRcvBuf *tcpip.ReceiveBufferSizeOption, options []byte) { + // Create TCP endpoint. + var err *tcpip.Error + c.EP, err = c.s.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &c.WQ) + if err != nil { + c.t.Fatalf("NewEndpoint failed: %v", err) + } + + if epRcvBuf != nil { + if err := c.EP.SetSockOpt(*epRcvBuf); err != nil { + c.t.Fatalf("SetSockOpt failed failed: %v", err) + } + } + c.Connect(iss, rcvWnd, options) +} + // RawEndpoint is just a small wrapper around a TCP endpoint's state to make // sending data and ACK packets easy while being able to manipulate the sequence // numbers and timestamp values as needed. diff --git a/test/syscalls/linux/socket_ip_tcp_generic.cc b/test/syscalls/linux/socket_ip_tcp_generic.cc index 5b198f49d..0b76280a7 100644 --- a/test/syscalls/linux/socket_ip_tcp_generic.cc +++ b/test/syscalls/linux/socket_ip_tcp_generic.cc @@ -592,5 +592,109 @@ TEST_P(TCPSocketPairTest, MsgTruncMsgPeek) { EXPECT_EQ(0, memcmp(received_data2, sent_data, sizeof(sent_data))); } +TEST_P(TCPSocketPairTest, SetCongestionControlSucceedsForSupported) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + // Netstack only supports reno & cubic so we only test these two values here. + { + const char kSetCC[kTcpCaNameMax] = "reno"; + ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &kSetCC, strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax]; + memset(got_cc, '1', sizeof(got_cc)); + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC))); + } + { + const char kSetCC[kTcpCaNameMax] = "cubic"; + ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &kSetCC, strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax]; + memset(got_cc, '1', sizeof(got_cc)); + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC))); + } +} + +TEST_P(TCPSocketPairTest, SetGetTCPCongestionShortReadBuffer) { + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + { + // Verify that getsockopt/setsockopt work with buffers smaller than + // kTcpCaNameMax. + const char kSetCC[] = "cubic"; + ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &kSetCC, strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[sizeof(kSetCC)]; + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc))); + } +} + +TEST_P(TCPSocketPairTest, SetGetTCPCongestionLargeReadBuffer) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + { + // Verify that getsockopt works with buffers larger than + // kTcpCaNameMax. + const char kSetCC[] = "cubic"; + ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &kSetCC, strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax + 5]; + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + // Linux copies the minimum of kTcpCaNameMax or the length of the passed in + // buffer and sets optlen to the number of bytes actually copied + // irrespective of the actual length of the congestion control name. + EXPECT_EQ(kTcpCaNameMax, optlen); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC))); + } +} + +TEST_P(TCPSocketPairTest, SetCongestionControlFailsForUnsupported) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); + char old_cc[kTcpCaNameMax]; + socklen_t optlen; + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &old_cc, &optlen), + SyscallSucceedsWithValue(0)); + + const char kSetCC[] = "invalid_ca_cc"; + ASSERT_THAT(setsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &kSetCC, strlen(kSetCC)), + SyscallFailsWithErrno(ENOENT)); + + char got_cc[kTcpCaNameMax]; + ASSERT_THAT(getsockopt(sockets->first_fd(), IPPROTO_TCP, TCP_CONGESTION, + &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(old_cc))); +} + } // namespace testing } // namespace gvisor diff --git a/test/syscalls/linux/tcp_socket.cc b/test/syscalls/linux/tcp_socket.cc index e3f9f9f9d..e95b644ac 100644 --- a/test/syscalls/linux/tcp_socket.cc +++ b/test/syscalls/linux/tcp_socket.cc @@ -751,6 +751,133 @@ TEST_P(SimpleTcpSocketTest, NonBlockingConnectRefused) { EXPECT_THAT(close(s.release()), SyscallSucceeds()); } +// Test that setting a supported congestion control algorithm succeeds for an +// unconnected TCP socket +TEST_P(SimpleTcpSocketTest, SetCongestionControlSucceedsForSupported) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + FileDescriptor s = + ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP)); + { + const char kSetCC[kTcpCaNameMax] = "reno"; + ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC, + strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax]; + memset(got_cc, '1', sizeof(got_cc)); + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + // We ignore optlen here as the linux kernel sets optlen to the lower of the + // size of the buffer passed in or kTcpCaNameMax and not the length of the + // congestion control algorithm's actual name. + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax))); + } + { + const char kSetCC[kTcpCaNameMax] = "cubic"; + ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC, + strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax]; + memset(got_cc, '1', sizeof(got_cc)); + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + // We ignore optlen here as the linux kernel sets optlen to the lower of the + // size of the buffer passed in or kTcpCaNameMax and not the length of the + // congestion control algorithm's actual name. + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kTcpCaNameMax))); + } +} + +// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is +// consistent between linux and gvisor when the passed in buffer is smaller than +// kTcpCaNameMax. +TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionShortReadBuffer) { + FileDescriptor s = + ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP)); + { + // Verify that getsockopt/setsockopt work with buffers smaller than + // kTcpCaNameMax. + const char kSetCC[] = "cubic"; + ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC, + strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[sizeof(kSetCC)]; + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(sizeof(got_cc), optlen); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(got_cc))); + } +} + +// This test verifies that a getsockopt(...TCP_CONGESTION) behaviour is +// consistent between linux and gvisor when the passed in buffer is larger than +// kTcpCaNameMax. +TEST_P(SimpleTcpSocketTest, SetGetTCPCongestionLargeReadBuffer) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + FileDescriptor s = + ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP)); + { + // Verify that getsockopt works with buffers larger than + // kTcpCaNameMax. + const char kSetCC[] = "cubic"; + ASSERT_THAT(setsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &kSetCC, + strlen(kSetCC)), + SyscallSucceedsWithValue(0)); + + char got_cc[kTcpCaNameMax + 5]; + socklen_t optlen = sizeof(got_cc); + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + // Linux copies the minimum of kTcpCaNameMax or the length of the passed in + // buffer and sets optlen to the number of bytes actually copied + // irrespective of the actual length of the congestion control name. + EXPECT_EQ(kTcpCaNameMax, optlen); + EXPECT_EQ(0, memcmp(got_cc, kSetCC, sizeof(kSetCC))); + } +} + +// Test that setting an unsupported congestion control algorithm fails for an +// unconnected TCP socket. +TEST_P(SimpleTcpSocketTest, SetCongestionControlFailsForUnsupported) { + // This is Linux's net/tcp.h TCP_CA_NAME_MAX. + const int kTcpCaNameMax = 16; + + FileDescriptor s = + ASSERT_NO_ERRNO_AND_VALUE(Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP)); + char old_cc[kTcpCaNameMax]; + socklen_t optlen = sizeof(old_cc); + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &old_cc, &optlen), + SyscallSucceedsWithValue(0)); + + const char kSetCC[] = "invalid_ca_kSetCC"; + ASSERT_THAT( + setsockopt(s.get(), SOL_TCP, TCP_CONGESTION, &kSetCC, strlen(kSetCC)), + SyscallFailsWithErrno(ENOENT)); + + char got_cc[kTcpCaNameMax]; + ASSERT_THAT( + getsockopt(s.get(), IPPROTO_TCP, TCP_CONGESTION, &got_cc, &optlen), + SyscallSucceedsWithValue(0)); + // We ignore optlen here as the linux kernel sets optlen to the lower of the + // size of the buffer passed in or kTcpCaNameMax and not the length of the + // congestion control algorithm's actual name. + EXPECT_EQ(0, memcmp(got_cc, old_cc, sizeof(kTcpCaNameMax))); +} + INSTANTIATE_TEST_SUITE_P(AllInetTests, SimpleTcpSocketTest, ::testing::Values(AF_INET, AF_INET6)); -- cgit v1.2.3