From af323eb7c1830053627de6161f8ce73ac5f06d4e Mon Sep 17 00:00:00 2001 From: Ting-Yu Wang Date: Thu, 14 Nov 2019 17:02:59 -0800 Subject: Fix return codes for {get,set}sockopt for some nullptr cases. Updates #1092 PiperOrigin-RevId: 280547239 --- test/syscalls/linux/socket_ip_unbound.cc | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index b02872308..b6754111f 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -354,6 +354,38 @@ TEST_P(IPUnboundSocketTest, InvalidNegativeTOS) { EXPECT_EQ(get, expect); } +TEST_P(IPUnboundSocketTest, NullTOS) { + auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + TOSOption t = GetTOSOption(GetParam().domain); + int set_sz = sizeof(int); + if (GetParam().domain == AF_INET) { + EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz), + SyscallFailsWithErrno(EFAULT)); + } else { // AF_INET6 + // The AF_INET6 behavior is not yet compatible. gVisor will try to read + // optval from user memory at syscall handler, it needs substantial + // refactoring to implement this behavior just for IPv6. + if (IsRunningOnGvisor()) { + EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz), + SyscallFailsWithErrno(EFAULT)); + } else { + // Linux's IPv6 stack treats nullptr optval as input of 0, so the call + // succeeds. (net/ipv6/ipv6_sockglue.c, do_ipv6_setsockopt()) + // + // Linux's implementation would need fixing as passing a nullptr as optval + // and non-zero optlen may not be valid. + EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz), + SyscallSucceedsWithValue(0)); + } + } + socklen_t get_sz = sizeof(int); + EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, nullptr, &get_sz), + SyscallFailsWithErrno(EFAULT)); + int get = -1; + EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, nullptr), + SyscallFailsWithErrno(EFAULT)); +} + INSTANTIATE_TEST_SUITE_P( IPUnboundSockets, IPUnboundSocketTest, ::testing::ValuesIn(VecCat(VecCat( -- cgit v1.2.3 From b3ae8a62cfdf13821d35467d4150ed983ac556f1 Mon Sep 17 00:00:00 2001 From: Ting-Yu Wang Date: Wed, 8 Jan 2020 16:29:12 -0800 Subject: Fix slice bounds out of range panic in parsing socket control message. Panic found by syzakller. PiperOrigin-RevId: 288799046 --- pkg/sentry/socket/control/control.go | 6 ++++++ test/syscalls/linux/socket_ip_unbound.cc | 33 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/pkg/sentry/socket/control/control.go b/pkg/sentry/socket/control/control.go index af1a4e95f..4301b697c 100644 --- a/pkg/sentry/socket/control/control.go +++ b/pkg/sentry/socket/control/control.go @@ -471,6 +471,9 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con case linux.SOL_IP: switch h.Type { case linux.IP_TOS: + if length < linux.SizeOfControlMessageTOS { + return socket.ControlMessages{}, syserror.EINVAL + } cmsgs.IP.HasTOS = true binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTOS], usermem.ByteOrder, &cmsgs.IP.TOS) i += AlignUp(length, width) @@ -481,6 +484,9 @@ func Parse(t *kernel.Task, socketOrEndpoint interface{}, buf []byte) (socket.Con case linux.SOL_IPV6: switch h.Type { case linux.IPV6_TCLASS: + if length < linux.SizeOfControlMessageTClass { + return socket.ControlMessages{}, syserror.EINVAL + } cmsgs.IP.HasTClass = true binary.Unmarshal(buf[i:i+linux.SizeOfControlMessageTClass], usermem.ByteOrder, &cmsgs.IP.TClass) i += AlignUp(length, width) diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index b6754111f..ca597e267 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -129,6 +129,7 @@ TEST_P(IPUnboundSocketTest, InvalidNegativeTtl) { struct TOSOption { int level; int option; + int cmsg_level; }; constexpr int INET_ECN_MASK = 3; @@ -139,10 +140,12 @@ static TOSOption GetTOSOption(int domain) { case AF_INET: opt.level = IPPROTO_IP; opt.option = IP_TOS; + opt.cmsg_level = SOL_IP; break; case AF_INET6: opt.level = IPPROTO_IPV6; opt.option = IPV6_TCLASS; + opt.cmsg_level = SOL_IPV6; break; } return opt; @@ -386,6 +389,36 @@ TEST_P(IPUnboundSocketTest, NullTOS) { SyscallFailsWithErrno(EFAULT)); } +TEST_P(IPUnboundSocketTest, InsufficientBufferTOS) { + SKIP_IF(GetParam().protocol == IPPROTO_TCP); + + auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + TOSOption t = GetTOSOption(GetParam().domain); + + in_addr addr4; + in6_addr addr6; + ASSERT_THAT(inet_pton(AF_INET, "127.0.0.1", &addr4), ::testing::Eq(1)); + ASSERT_THAT(inet_pton(AF_INET6, "fe80::", &addr6), ::testing::Eq(1)); + + cmsghdr cmsg = {}; + cmsg.cmsg_len = sizeof(cmsg); + cmsg.cmsg_level = t.cmsg_level; + cmsg.cmsg_type = t.option; + + msghdr msg = {}; + msg.msg_control = &cmsg; + msg.msg_controllen = sizeof(cmsg); + if (GetParam().domain == AF_INET) { + msg.msg_name = &addr4; + msg.msg_namelen = sizeof(addr4); + } else { + msg.msg_name = &addr6; + msg.msg_namelen = sizeof(addr6); + } + + EXPECT_THAT(sendmsg(socket->get(), &msg, 0), SyscallFailsWithErrno(EINVAL)); +} + INSTANTIATE_TEST_SUITE_P( IPUnboundSockets, IPUnboundSocketTest, ::testing::ValuesIn(VecCat(VecCat( -- cgit v1.2.3 From 2296b4734462b6eeef383ea58e2b1b0b1a214d76 Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Tue, 21 Jan 2020 16:16:51 -0800 Subject: Change to standard types. PiperOrigin-RevId: 290846481 --- test/syscalls/linux/aio.cc | 2 +- test/syscalls/linux/chown.cc | 6 +- test/syscalls/linux/chroot.cc | 2 +- test/syscalls/linux/clock_gettime.cc | 12 ++-- test/syscalls/linux/eventfd.cc | 22 ++++---- test/syscalls/linux/exceptions.cc | 66 +++++++++++----------- test/syscalls/linux/exec.cc | 10 ++-- test/syscalls/linux/exec_binary.cc | 18 +++--- test/syscalls/linux/fcntl.cc | 22 ++++---- test/syscalls/linux/fork.cc | 2 +- test/syscalls/linux/futex.cc | 2 +- test/syscalls/linux/inotify.cc | 24 ++++---- test/syscalls/linux/ip_socket_test_util.cc | 4 +- test/syscalls/linux/ip_socket_test_util.h | 4 +- test/syscalls/linux/itimer.cc | 4 +- test/syscalls/linux/kill.cc | 4 +- test/syscalls/linux/link.cc | 5 +- test/syscalls/linux/memfd.cc | 2 +- test/syscalls/linux/memory_accounting.cc | 14 ++--- test/syscalls/linux/mempolicy.cc | 28 ++++----- test/syscalls/linux/mmap.cc | 16 +++--- test/syscalls/linux/open.cc | 2 +- test/syscalls/linux/partial_bad_buffer.cc | 2 +- test/syscalls/linux/prctl_setuid.cc | 2 +- test/syscalls/linux/proc.cc | 42 +++++++------- test/syscalls/linux/proc_net_tcp.cc | 62 ++++++++++---------- test/syscalls/linux/proc_net_udp.cc | 32 +++++------ test/syscalls/linux/proc_net_unix.cc | 12 ++-- test/syscalls/linux/proc_pid_uid_gid_map.cc | 26 ++++----- test/syscalls/linux/ptrace.cc | 4 +- test/syscalls/linux/pty.cc | 14 ++--- test/syscalls/linux/pwrite64.cc | 4 +- test/syscalls/linux/raw_socket_hdrincl.cc | 4 +- test/syscalls/linux/rseq.cc | 2 +- test/syscalls/linux/rseq/critical.h | 2 +- test/syscalls/linux/rseq/rseq.cc | 50 ++++++++-------- test/syscalls/linux/rseq/types.h | 16 +++--- test/syscalls/linux/seccomp.cc | 14 ++--- test/syscalls/linux/semaphore.cc | 10 ++-- test/syscalls/linux/shm.cc | 10 ++-- test/syscalls/linux/sigaltstack.cc | 2 +- test/syscalls/linux/sigiret.cc | 14 ++--- .../linux/socket_bind_to_device_distribution.cc | 14 ++--- test/syscalls/linux/socket_generic.cc | 2 +- test/syscalls/linux/socket_inet_loopback.cc | 56 +++++++++--------- test/syscalls/linux/socket_ip_unbound.cc | 8 +-- test/syscalls/linux/socket_netdevice.cc | 8 +-- test/syscalls/linux/socket_netlink_route.cc | 30 +++++----- test/syscalls/linux/socket_netlink_util.cc | 4 +- test/syscalls/linux/socket_netlink_util.h | 2 +- test/syscalls/linux/socket_test_util.cc | 2 +- test/syscalls/linux/splice.cc | 2 +- test/syscalls/linux/stat.cc | 40 ++++++------- test/syscalls/linux/sticky.cc | 4 +- test/syscalls/linux/sysret.cc | 8 +-- test/syscalls/linux/tcp_socket.cc | 2 +- test/syscalls/linux/time.cc | 4 +- test/syscalls/linux/timerfd.cc | 48 ++++++++-------- test/syscalls/linux/udp_socket_test_cases.cc | 14 ++--- test/syscalls/linux/uidgid.cc | 8 +-- test/syscalls/linux/utimes.cc | 25 ++++---- test/syscalls/linux/vfork.cc | 14 ++--- test/syscalls/linux/vsyscall.cc | 2 +- test/syscalls/linux/wait.cc | 18 +++--- test/util/mount_util.h | 6 +- test/util/multiprocess_util.cc | 2 +- test/util/multiprocess_util.h | 5 +- test/util/proc_util.cc | 2 +- test/util/temp_path.cc | 2 +- test/util/test_util.cc | 20 +++---- test/util/test_util.h | 12 ++-- test/util/test_util_test.cc | 4 +- 72 files changed, 483 insertions(+), 480 deletions(-) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/test/syscalls/linux/aio.cc b/test/syscalls/linux/aio.cc index 28592bc8f..a33daff17 100644 --- a/test/syscalls/linux/aio.cc +++ b/test/syscalls/linux/aio.cc @@ -183,7 +183,7 @@ TEST_F(AIOTest, BadWrite) { // Verify that it fails with the right error code. EXPECT_EQ(events[0].data, 0x123); - EXPECT_EQ(events[0].obj, reinterpret_cast(&cb)); + EXPECT_EQ(events[0].obj, reinterpret_cast(&cb)); EXPECT_LT(events[0].res, 0); } diff --git a/test/syscalls/linux/chown.cc b/test/syscalls/linux/chown.cc index 1c00e2731..7a28b674d 100644 --- a/test/syscalls/linux/chown.cc +++ b/test/syscalls/linux/chown.cc @@ -31,9 +31,9 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -ABSL_FLAG(int32, scratch_uid1, 65534, "first scratch UID"); -ABSL_FLAG(int32, scratch_uid2, 65533, "second scratch UID"); -ABSL_FLAG(int32, scratch_gid, 65534, "first scratch GID"); +ABSL_FLAG(int32_t, scratch_uid1, 65534, "first scratch UID"); +ABSL_FLAG(int32_t, scratch_uid2, 65533, "second scratch UID"); +ABSL_FLAG(int32_t, scratch_gid, 65534, "first scratch GID"); namespace gvisor { namespace testing { diff --git a/test/syscalls/linux/chroot.cc b/test/syscalls/linux/chroot.cc index 27e057086..04bc2d7b9 100644 --- a/test/syscalls/linux/chroot.cc +++ b/test/syscalls/linux/chroot.cc @@ -253,7 +253,7 @@ TEST(ChrootTest, ProcMemSelfMapsNoEscapeProcOpen) { // Mmap the newly created file. void* foo_map = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE, foo.get(), 0); - ASSERT_THAT(reinterpret_cast(foo_map), SyscallSucceeds()); + ASSERT_THAT(reinterpret_cast(foo_map), SyscallSucceeds()); // Always unmap. auto cleanup_map = Cleanup( diff --git a/test/syscalls/linux/clock_gettime.cc b/test/syscalls/linux/clock_gettime.cc index 1d5b5af94..7f6015049 100644 --- a/test/syscalls/linux/clock_gettime.cc +++ b/test/syscalls/linux/clock_gettime.cc @@ -34,7 +34,7 @@ namespace testing { namespace { -int64 clock_gettime_nsecs(clockid_t id) { +int64_t clock_gettime_nsecs(clockid_t id) { struct timespec ts; TEST_PCHECK(clock_gettime(id, &ts) == 0); return (ts.tv_sec * 1000000000 + ts.tv_nsec); @@ -42,9 +42,9 @@ int64 clock_gettime_nsecs(clockid_t id) { // Spin on the CPU for at least ns nanoseconds, based on // CLOCK_THREAD_CPUTIME_ID. -void spin_ns(int64 ns) { - int64 start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID); - int64 end = start + ns; +void spin_ns(int64_t ns) { + int64_t start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID); + int64_t end = start + ns; do { constexpr int kLoopCount = 1000000; // large and arbitrary @@ -64,7 +64,7 @@ TEST(ClockGettime, CputimeId) { // the workers. Note that we test CLOCK_PROCESS_CPUTIME_ID by having the // workers execute in parallel and verifying that CLOCK_PROCESS_CPUTIME_ID // accumulates the runtime of all threads. - int64 start = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID); + int64_t start = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID); // Create a kNumThreads threads. std::list threads; @@ -76,7 +76,7 @@ TEST(ClockGettime, CputimeId) { t.Join(); } - int64 end = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID); + int64_t end = clock_gettime_nsecs(CLOCK_PROCESS_CPUTIME_ID); // The aggregate time spent in the worker threads must be at least // 'kNumThreads' times the time each thread spun. diff --git a/test/syscalls/linux/eventfd.cc b/test/syscalls/linux/eventfd.cc index fed67a56e..367682c3d 100644 --- a/test/syscalls/linux/eventfd.cc +++ b/test/syscalls/linux/eventfd.cc @@ -37,7 +37,7 @@ TEST(EventfdTest, Nonblock) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 l; + uint64_t l; ASSERT_THAT(read(efd.get(), &l, sizeof(l)), SyscallFailsWithErrno(EAGAIN)); l = 1; @@ -52,7 +52,7 @@ TEST(EventfdTest, Nonblock) { void* read_three_times(void* arg) { int efd = *reinterpret_cast(arg); - uint64 l; + uint64_t l; EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l))); EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l))); EXPECT_THAT(read(efd, &l, sizeof(l)), SyscallSucceedsWithValue(sizeof(l))); @@ -68,7 +68,7 @@ TEST(EventfdTest, BlockingWrite) { reinterpret_cast(&efd)), SyscallSucceeds()); - uint64 l = 1; + uint64_t l = 1; ASSERT_THAT(write(efd, &l, sizeof(l)), SyscallSucceeds()); EXPECT_EQ(l, 1); @@ -85,7 +85,7 @@ TEST(EventfdTest, SmallWrite) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 l = 16; + uint64_t l = 16; ASSERT_THAT(write(efd.get(), &l, 4), SyscallFailsWithErrno(EINVAL)); } @@ -93,7 +93,7 @@ TEST(EventfdTest, SmallRead) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 l = 1; + uint64_t l = 1; ASSERT_THAT(write(efd.get(), &l, sizeof(l)), SyscallSucceeds()); l = 0; @@ -104,7 +104,7 @@ TEST(EventfdTest, BigWrite) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 big[16]; + uint64_t big[16]; big[0] = 16; ASSERT_THAT(write(efd.get(), big, sizeof(big)), SyscallSucceeds()); } @@ -113,10 +113,10 @@ TEST(EventfdTest, BigRead) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 l = 1; + uint64_t l = 1; ASSERT_THAT(write(efd.get(), &l, sizeof(l)), SyscallSucceeds()); - uint64 big[16]; + uint64_t big[16]; ASSERT_THAT(read(efd.get(), big, sizeof(big)), SyscallSucceeds()); EXPECT_EQ(big[0], 1); } @@ -125,7 +125,7 @@ TEST(EventfdTest, BigWriteBigRead) { FileDescriptor efd = ASSERT_NO_ERRNO_AND_VALUE(NewEventFD(0, EFD_NONBLOCK | EFD_SEMAPHORE)); - uint64 l[16]; + uint64_t l[16]; l[0] = 16; ASSERT_THAT(write(efd.get(), l, sizeof(l)), SyscallSucceeds()); ASSERT_THAT(read(efd.get(), l, sizeof(l)), SyscallSucceeds()); @@ -150,7 +150,7 @@ TEST(EventfdTest, NotifyNonZero_NoRandomSave) { int wait_out = epoll_wait(epollfd.get(), &out_ev, 1, kEpollTimeoutMs); EXPECT_EQ(wait_out, 1); EXPECT_EQ(efd.get(), out_ev.data.fd); - uint64 val = 0; + uint64_t val = 0; ASSERT_THAT(read(efd.get(), &val, sizeof(val)), SyscallSucceeds()); EXPECT_EQ(val, 1); @@ -159,7 +159,7 @@ TEST(EventfdTest, NotifyNonZero_NoRandomSave) { // epoll_wait times out. ScopedThread t([&efd] { sleep(5); - uint64 val = 1; + uint64_t val = 1; EXPECT_THAT(write(efd.get(), &val, sizeof(val)), SyscallSucceedsWithValue(sizeof(val))); }); diff --git a/test/syscalls/linux/exceptions.cc b/test/syscalls/linux/exceptions.cc index 0b67eb0ad..3d564e720 100644 --- a/test/syscalls/linux/exceptions.cc +++ b/test/syscalls/linux/exceptions.cc @@ -24,20 +24,20 @@ namespace testing { // Default value for the x87 FPU control word. See Intel SDM Vol 1, Ch 8.1.5 // "x87 FPU Control Word". -constexpr uint16 kX87ControlWordDefault = 0x37f; +constexpr uint16_t kX87ControlWordDefault = 0x37f; // Mask for the divide-by-zero exception. -constexpr uint16 kX87ControlWordDiv0Mask = 1 << 2; +constexpr uint16_t kX87ControlWordDiv0Mask = 1 << 2; // Default value for the SSE control register (MXCSR). See Intel SDM Vol 1, Ch // 11.6.4 "Initialization of SSE/SSE3 Extensions". -constexpr uint32 kMXCSRDefault = 0x1f80; +constexpr uint32_t kMXCSRDefault = 0x1f80; // Mask for the divide-by-zero exception. -constexpr uint32 kMXCSRDiv0Mask = 1 << 9; +constexpr uint32_t kMXCSRDiv0Mask = 1 << 9; // Flag for a pending divide-by-zero exception. -constexpr uint32 kMXCSRDiv0Flag = 1 << 2; +constexpr uint32_t kMXCSRDiv0Flag = 1 << 2; void inline Halt() { asm("hlt\r\n"); } @@ -112,10 +112,10 @@ TEST(ExceptionTest, DivideByZero) { EXPECT_EXIT( { - uint32 remainder; - uint32 quotient; - uint32 divisor = 0; - uint64 value = 1; + uint32_t remainder; + uint32_t quotient; + uint32_t divisor = 0; + uint64_t value = 1; asm("divl 0(%2)\r\n" : "=d"(remainder), "=a"(quotient) : "r"(&divisor), "d"(value >> 32), "a"(value)); @@ -126,9 +126,9 @@ TEST(ExceptionTest, DivideByZero) { // By default, x87 exceptions are masked and simply return a default value. TEST(ExceptionTest, X87DivideByZeroMasked) { - int32 quotient; - int32 value = 1; - int32 divisor = 0; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm("fildl %[value]\r\n" "fidivl %[divisor]\r\n" "fistpl %[quotient]\r\n" @@ -148,12 +148,12 @@ TEST(ExceptionTest, X87DivideByZeroUnmasked) { EXPECT_EXIT( { // Clear the divide by zero exception mask. - constexpr uint16 kControlWord = + constexpr uint16_t kControlWord = kX87ControlWordDefault & ~kX87ControlWordDiv0Mask; - int32 quotient; - int32 value = 1; - int32 divisor = 0; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm volatile( "fldcw %[cw]\r\n" "fildl %[value]\r\n" @@ -176,12 +176,12 @@ TEST(ExceptionTest, X87StatusClobber) { EXPECT_EXIT( { // Clear the divide by zero exception mask. - constexpr uint16 kControlWord = + constexpr uint16_t kControlWord = kX87ControlWordDefault & ~kX87ControlWordDiv0Mask; - int32 quotient; - int32 value = 1; - int32 divisor = 0; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm volatile( "fildl %[value]\r\n" "fidivl %[divisor]\r\n" @@ -208,10 +208,10 @@ TEST(ExceptionTest, X87StatusClobber) { // By default, SSE exceptions are masked and simply return a default value. TEST(ExceptionTest, SSEDivideByZeroMasked) { - uint32 status; - int32 quotient; - int32 value = 1; - int32 divisor = 0; + uint32_t status; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm("cvtsi2ssl %[value], %%xmm0\r\n" "cvtsi2ssl %[divisor], %%xmm1\r\n" "divss %%xmm1, %%xmm0\r\n" @@ -233,11 +233,11 @@ TEST(ExceptionTest, SSEDivideByZeroUnmasked) { EXPECT_EXIT( { // Clear the divide by zero exception mask. - constexpr uint32 kMXCSR = kMXCSRDefault & ~kMXCSRDiv0Mask; + constexpr uint32_t kMXCSR = kMXCSRDefault & ~kMXCSRDiv0Mask; - int32 quotient; - int32 value = 1; - int32 divisor = 0; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm volatile( "ldmxcsr %[mxcsr]\r\n" "cvtsi2ssl %[value], %%xmm0\r\n" @@ -254,10 +254,10 @@ TEST(ExceptionTest, SSEDivideByZeroUnmasked) { // Pending exceptions in the SSE status register are not clobbered by syscalls. TEST(ExceptionTest, SSEStatusClobber) { - uint32 mxcsr; - int32 quotient; - int32 value = 1; - int32 divisor = 0; + uint32_t mxcsr; + int32_t quotient; + int32_t value = 1; + int32_t divisor = 0; asm("cvtsi2ssl %[value], %%xmm0\r\n" "cvtsi2ssl %[divisor], %%xmm1\r\n" "divss %%xmm1, %%xmm0\r\n" @@ -336,7 +336,7 @@ TEST(ExceptionTest, AlignmentCheck) { SetAlignmentCheck(); for (int i = 0; i < 8; i++) { // At least 7/8 offsets will be unaligned here. - uint64* ptr = reinterpret_cast(&array[i]); + uint64_t* ptr = reinterpret_cast(&array[i]); asm("mov %0, 0(%0)\r\n" : : "r"(ptr) : "ax"); } }, diff --git a/test/syscalls/linux/exec.cc b/test/syscalls/linux/exec.cc index 9c5a11206..b5e0a512b 100644 --- a/test/syscalls/linux/exec.cc +++ b/test/syscalls/linux/exec.cc @@ -62,7 +62,7 @@ constexpr char kExecFromThread[] = "--exec_exec_from_thread"; // Runs file specified by dirfd and pathname with argv and checks that the exit // status is expect_status and that stderr contains expect_stderr. -void CheckExecHelper(const absl::optional dirfd, +void CheckExecHelper(const absl::optional dirfd, const std::string& pathname, const ExecveArray& argv, const ExecveArray& envv, const int flags, int expect_status, const std::string& expect_stderr) { @@ -143,15 +143,15 @@ void CheckExecHelper(const absl::optional dirfd, void CheckExec(const std::string& filename, const ExecveArray& argv, const ExecveArray& envv, int expect_status, const std::string& expect_stderr) { - CheckExecHelper(/*dirfd=*/absl::optional(), filename, argv, envv, + CheckExecHelper(/*dirfd=*/absl::optional(), filename, argv, envv, /*flags=*/0, expect_status, expect_stderr); } -void CheckExecveat(const int32 dirfd, const std::string& pathname, +void CheckExecveat(const int32_t dirfd, const std::string& pathname, const ExecveArray& argv, const ExecveArray& envv, const int flags, int expect_status, const std::string& expect_stderr) { - CheckExecHelper(absl::optional(dirfd), pathname, argv, envv, flags, + CheckExecHelper(absl::optional(dirfd), pathname, argv, envv, flags, expect_status, expect_stderr); } @@ -603,7 +603,7 @@ TEST(ExecveatTest, AbsolutePathWithFDCWD) { TEST(ExecveatTest, AbsolutePath) { std::string path = RunfilePath(kBasicWorkload); // File descriptor should be ignored when an absolute path is given. - const int32 badFD = -1; + const int32_t badFD = -1; CheckExecveat(badFD, path, {path}, {}, ArgEnvExitStatus(0, 0), 0, absl::StrCat(path, "\n")); } diff --git a/test/syscalls/linux/exec_binary.cc b/test/syscalls/linux/exec_binary.cc index 144bf45cf..736452b0c 100644 --- a/test/syscalls/linux/exec_binary.cc +++ b/test/syscalls/linux/exec_binary.cc @@ -700,7 +700,7 @@ TEST(ElfTest, PIE) { // The first segment really needs to start at 0 for a normal PIE binary, and // thus includes the headers. - const uint64 offset = elf.phdrs[1].p_offset; + const uint64_t offset = elf.phdrs[1].p_offset; elf.phdrs[1].p_offset = 0x0; elf.phdrs[1].p_vaddr = 0x0; elf.phdrs[1].p_filesz += offset; @@ -720,7 +720,7 @@ TEST(ElfTest, PIE) { struct user_regs_struct regs; ASSERT_THAT(ptrace(PTRACE_GETREGS, child, 0, ®s), SyscallSucceeds()); - const uint64 load_addr = regs.rip & ~(kPageSize - 1); + const uint64_t load_addr = regs.rip & ~(kPageSize - 1); EXPECT_THAT(child, ContainsMappings(std::vector({ // text page. @@ -789,7 +789,7 @@ TEST(ElfTest, PIENonZeroStart) { struct user_regs_struct regs; ASSERT_THAT(ptrace(PTRACE_GETREGS, child, 0, ®s), SyscallSucceeds()); - const uint64 load_addr = regs.rip & ~(kPageSize - 1); + const uint64_t load_addr = regs.rip & ~(kPageSize - 1); // The ELF is loaded at an arbitrary address, not the first PT_LOAD vaddr. // @@ -859,7 +859,7 @@ TEST(ElfTest, ELFInterpreter) { // The first segment really needs to start at 0 for a normal PIE binary, and // thus includes the headers. - uint64 const offset = interpreter.phdrs[1].p_offset; + uint64_t const offset = interpreter.phdrs[1].p_offset; // N.B. Since Linux 4.10 (0036d1f7eb95b "binfmt_elf: fix calculations for bss // padding"), Linux unconditionally zeroes the remainder of the highest mapped // page in an interpreter, failing if the protections don't allow write. Thus @@ -912,7 +912,7 @@ TEST(ElfTest, ELFInterpreter) { struct user_regs_struct regs; ASSERT_THAT(ptrace(PTRACE_GETREGS, child, 0, ®s), SyscallSucceeds()); - const uint64 interp_load_addr = regs.rip & ~(kPageSize - 1); + const uint64_t interp_load_addr = regs.rip & ~(kPageSize - 1); EXPECT_THAT( child, ContainsMappings(std::vector({ @@ -1047,7 +1047,7 @@ TEST(ElfTest, ELFInterpreterRelative) { // The first segment really needs to start at 0 for a normal PIE binary, and // thus includes the headers. - uint64 const offset = interpreter.phdrs[1].p_offset; + uint64_t const offset = interpreter.phdrs[1].p_offset; // See comment in ElfTest.ELFInterpreter. interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X; interpreter.phdrs[1].p_offset = 0x0; @@ -1086,7 +1086,7 @@ TEST(ElfTest, ELFInterpreterRelative) { struct user_regs_struct regs; ASSERT_THAT(ptrace(PTRACE_GETREGS, child, 0, ®s), SyscallSucceeds()); - const uint64 interp_load_addr = regs.rip & ~(kPageSize - 1); + const uint64_t interp_load_addr = regs.rip & ~(kPageSize - 1); EXPECT_THAT( child, ContainsMappings(std::vector({ @@ -1109,7 +1109,7 @@ TEST(ElfTest, ELFInterpreterWrongArch) { // The first segment really needs to start at 0 for a normal PIE binary, and // thus includes the headers. - uint64 const offset = interpreter.phdrs[1].p_offset; + uint64_t const offset = interpreter.phdrs[1].p_offset; // See comment in ElfTest.ELFInterpreter. interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X; interpreter.phdrs[1].p_offset = 0x0; @@ -1190,7 +1190,7 @@ TEST(ElfTest, ElfInterpreterNoExecute) { // The first segment really needs to start at 0 for a normal PIE binary, and // thus includes the headers. - uint64 const offset = interpreter.phdrs[1].p_offset; + uint64_t const offset = interpreter.phdrs[1].p_offset; // See comment in ElfTest.ELFInterpreter. interpreter.phdrs[1].p_flags = PF_R | PF_W | PF_X; interpreter.phdrs[1].p_offset = 0x0; diff --git a/test/syscalls/linux/fcntl.cc b/test/syscalls/linux/fcntl.cc index 6eb597eae..4f3aa81d6 100644 --- a/test/syscalls/linux/fcntl.cc +++ b/test/syscalls/linux/fcntl.cc @@ -46,9 +46,9 @@ ABSL_FLAG(bool, blocking, false, "Whether to set a blocking lock (otherwise non-blocking)."); ABSL_FLAG(bool, retry_eintr, false, "Whether to retry in the subprocess on EINTR."); -ABSL_FLAG(uint64, child_setlock_start, 0, "The value of struct flock start"); -ABSL_FLAG(uint64, child_setlock_len, 0, "The value of struct flock len"); -ABSL_FLAG(int32, socket_fd, -1, +ABSL_FLAG(uint64_t, child_setlock_start, 0, "The value of struct flock start"); +ABSL_FLAG(uint64_t, child_setlock_len, 0, "The value of struct flock len"); +ABSL_FLAG(int32_t, socket_fd, -1, "A socket to use for communicating more state back " "to the parent."); @@ -71,8 +71,8 @@ class FcntlLockTest : public ::testing::Test { EXPECT_THAT(close(fds_[1]), SyscallSucceeds()); } - int64 GetSubprocessFcntlTimeInUsec() { - int64 ret = 0; + int64_t GetSubprocessFcntlTimeInUsec() { + int64_t ret = 0; EXPECT_THAT(ReadFd(fds_[0], reinterpret_cast(&ret), sizeof(ret)), SyscallSucceedsWithValue(sizeof(ret))); return ret; @@ -676,7 +676,7 @@ TEST_F(FcntlLockTest, SetWriteLockThenBlockingWriteLock) { // We will wait kHoldLockForSec before we release our lock allowing the // subprocess to obtain it. constexpr absl::Duration kHoldLockFor = absl::Seconds(5); - const int64 kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); + const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); absl::SleepFor(kHoldLockFor); @@ -685,7 +685,7 @@ TEST_F(FcntlLockTest, SetWriteLockThenBlockingWriteLock) { ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds()); // Read the blocked time from the subprocess socket. - int64 subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); + int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); // We must have been waiting at least kMinBlockTime. EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec); @@ -729,7 +729,7 @@ TEST_F(FcntlLockTest, SetReadLockThenBlockingWriteLock) { // subprocess to obtain it. constexpr absl::Duration kHoldLockFor = absl::Seconds(5); - const int64 kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); + const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); absl::SleepFor(kHoldLockFor); @@ -738,7 +738,7 @@ TEST_F(FcntlLockTest, SetReadLockThenBlockingWriteLock) { ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds()); // Read the blocked time from the subprocess socket. - int64 subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); + int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); // We must have been waiting at least kMinBlockTime. EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec); @@ -782,7 +782,7 @@ TEST_F(FcntlLockTest, SetWriteLockThenBlockingReadLock) { // subprocess to obtain it. constexpr absl::Duration kHoldLockFor = absl::Seconds(5); - const int64 kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); + const int64_t kMinBlockTimeUsec = absl::ToInt64Microseconds(absl::Seconds(1)); absl::SleepFor(kHoldLockFor); @@ -791,7 +791,7 @@ TEST_F(FcntlLockTest, SetWriteLockThenBlockingReadLock) { ASSERT_THAT(fcntl(fd.get(), F_SETLKW, &fl), SyscallSucceeds()); // Read the blocked time from the subprocess socket. - int64 subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); + int64_t subprocess_blocked_time_usec = GetSubprocessFcntlTimeInUsec(); // We must have been waiting at least kMinBlockTime. EXPECT_GT(subprocess_blocked_time_usec, kMinBlockTimeUsec); diff --git a/test/syscalls/linux/fork.cc b/test/syscalls/linux/fork.cc index 486189697..371890110 100644 --- a/test/syscalls/linux/fork.cc +++ b/test/syscalls/linux/fork.cc @@ -270,7 +270,7 @@ TEST_F(ForkTest, Alarm) { // Child cannot affect parent private memory. TEST_F(ForkTest, PrivateMemory) { - std::atomic local(0); + std::atomic local(0); pid_t child1 = Fork(); if (child1 == 0) { diff --git a/test/syscalls/linux/futex.cc b/test/syscalls/linux/futex.cc index b4a7cc8d6..40c80a6e1 100644 --- a/test/syscalls/linux/futex.cc +++ b/test/syscalls/linux/futex.cc @@ -112,7 +112,7 @@ int futex_wake_bitset(bool priv, std::atomic* uaddr, int count, } int futex_wake_op(bool priv, std::atomic* uaddr1, std::atomic* uaddr2, - int nwake1, int nwake2, uint32 sub_op) { + int nwake1, int nwake2, uint32_t sub_op) { int op = FUTEX_WAKE_OP; if (priv) { op |= FUTEX_PRIVATE_FLAG; diff --git a/test/syscalls/linux/inotify.cc b/test/syscalls/linux/inotify.cc index 182d676d5..fdef646eb 100644 --- a/test/syscalls/linux/inotify.cc +++ b/test/syscalls/linux/inotify.cc @@ -48,26 +48,26 @@ constexpr int kBufSize = 1024; // C++-friendly version of struct inotify_event. struct Event { - int32 wd; - uint32 mask; - uint32 cookie; - uint32 len; + int32_t wd; + uint32_t mask; + uint32_t cookie; + uint32_t len; std::string name; - Event(uint32 mask, int32 wd, absl::string_view name, uint32 cookie) + Event(uint32_t mask, int32_t wd, absl::string_view name, uint32_t cookie) : wd(wd), mask(mask), cookie(cookie), len(name.size()), name(std::string(name)) {} - Event(uint32 mask, int32 wd, absl::string_view name) + Event(uint32_t mask, int32_t wd, absl::string_view name) : Event(mask, wd, name, 0) {} - Event(uint32 mask, int32 wd) : Event(mask, wd, "", 0) {} + Event(uint32_t mask, int32_t wd) : Event(mask, wd, "", 0) {} Event() : Event(0, 0, "", 0) {} }; // Prints the symbolic name for a struct inotify_event's 'mask' field. -std::string FlagString(uint32 flags) { +std::string FlagString(uint32_t flags) { std::vector names; #define EMIT(target) \ @@ -320,7 +320,7 @@ PosixErrorOr InotifyInit1(int flags) { } PosixErrorOr InotifyAddWatch(int fd, const std::string& path, - uint32 mask) { + uint32_t mask) { int wd; EXPECT_THAT(wd = inotify_add_watch(fd, path.c_str(), mask), SyscallSucceeds()); @@ -647,7 +647,7 @@ TEST(Inotify, MoveGeneratesEvents) { Event(IN_MOVED_TO, root_wd, Basename(newpath), events[1].cookie)})); EXPECT_NE(events[0].cookie, 0); EXPECT_EQ(events[0].cookie, events[1].cookie); - uint32 last_cookie = events[0].cookie; + uint32_t last_cookie = events[0].cookie; // Test move from root -> root/dir1. newpath = NewTempAbsPathInDir(dir1.path()); @@ -841,7 +841,7 @@ TEST(Inotify, ConcurrentThreadsGeneratingEvents) { } auto test_thread = [&files]() { - uint32 seed = time(nullptr); + uint32_t seed = time(nullptr); for (int i = 0; i < 20; i++) { const TempPath& file = files[rand_r(&seed) % files.size()]; const FileDescriptor file_fd = @@ -960,7 +960,7 @@ TEST(Inotify, BlockingReadOnInotifyFd) { t.Join(); // Make sure the event we got back is sane. - uint32 event_mask; + uint32_t event_mask; memcpy(&event_mask, buf.data() + offsetof(struct inotify_event, mask), sizeof(event_mask)); EXPECT_EQ(event_mask, IN_ACCESS); diff --git a/test/syscalls/linux/ip_socket_test_util.cc b/test/syscalls/linux/ip_socket_test_util.cc index f694a6360..6b472eb2f 100644 --- a/test/syscalls/linux/ip_socket_test_util.cc +++ b/test/syscalls/linux/ip_socket_test_util.cc @@ -24,12 +24,12 @@ namespace gvisor { namespace testing { -uint32 IPFromInetSockaddr(const struct sockaddr* addr) { +uint32_t IPFromInetSockaddr(const struct sockaddr* addr) { auto* in_addr = reinterpret_cast(addr); return in_addr->sin_addr.s_addr; } -uint16 PortFromInetSockaddr(const struct sockaddr* addr) { +uint16_t PortFromInetSockaddr(const struct sockaddr* addr) { auto* in_addr = reinterpret_cast(addr); return ntohs(in_addr->sin_port); } diff --git a/test/syscalls/linux/ip_socket_test_util.h b/test/syscalls/linux/ip_socket_test_util.h index 0eeca30dd..0f58e0f77 100644 --- a/test/syscalls/linux/ip_socket_test_util.h +++ b/test/syscalls/linux/ip_socket_test_util.h @@ -27,10 +27,10 @@ namespace gvisor { namespace testing { // Extracts the IP address from an inet sockaddr in network byte order. -uint32 IPFromInetSockaddr(const struct sockaddr* addr); +uint32_t IPFromInetSockaddr(const struct sockaddr* addr); // Extracts the port from an inet sockaddr in host byte order. -uint16 PortFromInetSockaddr(const struct sockaddr* addr); +uint16_t PortFromInetSockaddr(const struct sockaddr* addr); // InterfaceIndex returns the index of the named interface. PosixErrorOr InterfaceIndex(std::string name); diff --git a/test/syscalls/linux/itimer.cc b/test/syscalls/linux/itimer.cc index 52ffbe89d..b77e4cbd1 100644 --- a/test/syscalls/linux/itimer.cc +++ b/test/syscalls/linux/itimer.cc @@ -177,8 +177,8 @@ SignalTestResult ItimerSignalTest(int id, clock_t main_clock, SignalTestResult result; // Wait for the workers to be done and collect their sample counts. - result.worker_samples.push_back(reinterpret_cast(th1.Join())); - result.worker_samples.push_back(reinterpret_cast(th2.Join())); + result.worker_samples.push_back(reinterpret_cast(th1.Join())); + result.worker_samples.push_back(reinterpret_cast(th2.Join())); cleanup_itimer.Release()(); result.expected_total = (Now(main_clock) - start) / kPeriod; result.main_thread_samples = signal_test_num_samples.load(); diff --git a/test/syscalls/linux/kill.cc b/test/syscalls/linux/kill.cc index a2247fdeb..db29bd59c 100644 --- a/test/syscalls/linux/kill.cc +++ b/test/syscalls/linux/kill.cc @@ -32,8 +32,8 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -ABSL_FLAG(int32, scratch_uid, 65534, "scratch UID"); -ABSL_FLAG(int32, scratch_gid, 65534, "scratch GID"); +ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID"); +ABSL_FLAG(int32_t, scratch_gid, 65534, "scratch GID"); using ::testing::Ge; diff --git a/test/syscalls/linux/link.cc b/test/syscalls/linux/link.cc index 108a0c23e..e74fa2ed5 100644 --- a/test/syscalls/linux/link.cc +++ b/test/syscalls/linux/link.cc @@ -32,7 +32,7 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -ABSL_FLAG(int32, scratch_uid, 65534, "scratch UID"); +ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID"); namespace gvisor { namespace testing { @@ -55,7 +55,8 @@ TEST(LinkTest, CanCreateLinkFile) { const std::string newname = NewTempAbsPath(); // Get the initial link count. - uint64 initial_link_count = ASSERT_NO_ERRNO_AND_VALUE(Links(oldfile.path())); + uint64_t initial_link_count = + ASSERT_NO_ERRNO_AND_VALUE(Links(oldfile.path())); EXPECT_THAT(link(oldfile.path().c_str(), newname.c_str()), SyscallSucceeds()); diff --git a/test/syscalls/linux/memfd.cc b/test/syscalls/linux/memfd.cc index e10f250d1..e57b49a4a 100644 --- a/test/syscalls/linux/memfd.cc +++ b/test/syscalls/linux/memfd.cc @@ -61,7 +61,7 @@ int memfd_create(const std::string& name, unsigned int flags) { } PosixErrorOr MemfdCreate(const std::string& name, - uint32 flags) { + uint32_t flags) { int fd = memfd_create(name, flags); if (fd < 0) { return PosixError( diff --git a/test/syscalls/linux/memory_accounting.cc b/test/syscalls/linux/memory_accounting.cc index 987dbd151..94aea4077 100644 --- a/test/syscalls/linux/memory_accounting.cc +++ b/test/syscalls/linux/memory_accounting.cc @@ -33,7 +33,7 @@ using ::absl::StrFormat; // AnonUsageFromMeminfo scrapes the current anonymous memory usage from // /proc/meminfo and returns it in bytes. -PosixErrorOr AnonUsageFromMeminfo() { +PosixErrorOr AnonUsageFromMeminfo() { ASSIGN_OR_RETURN_ERRNO(auto meminfo, GetContents("/proc/meminfo")); std::vector lines(absl::StrSplit(meminfo, '\n')); @@ -47,7 +47,7 @@ PosixErrorOr AnonUsageFromMeminfo() { absl::StrSplit(line, ' ', absl::SkipEmpty())); if (parts.size() == 3) { // The size is the second field, let's try to parse it as a number. - ASSIGN_OR_RETURN_ERRNO(auto anon_kb, Atoi(parts[1])); + ASSIGN_OR_RETURN_ERRNO(auto anon_kb, Atoi(parts[1])); return anon_kb * 1024; } @@ -65,10 +65,10 @@ TEST(MemoryAccounting, AnonAccountingPreservedOnSaveRestore) { // the test. SKIP_IF(!IsRunningOnGvisor()); - uint64 anon_initial = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); + uint64_t anon_initial = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); // Cause some anonymous memory usage. - uint64 map_bytes = Megabytes(512); + uint64_t map_bytes = Megabytes(512); char* mem = static_cast(mmap(nullptr, map_bytes, PROT_READ | PROT_WRITE, MAP_POPULATE | MAP_ANON | MAP_PRIVATE, -1, 0)); @@ -77,11 +77,11 @@ TEST(MemoryAccounting, AnonAccountingPreservedOnSaveRestore) { // Write something to each page to prevent them from being decommited on // S/R. Zero pages are dropped on save. - for (uint64 i = 0; i < map_bytes; i += kPageSize) { + for (uint64_t i = 0; i < map_bytes; i += kPageSize) { mem[i] = 'a'; } - uint64 anon_after_alloc = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); + uint64_t anon_after_alloc = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); EXPECT_THAT(anon_after_alloc, EquivalentWithin(anon_initial + map_bytes, 0.03)); @@ -90,7 +90,7 @@ TEST(MemoryAccounting, AnonAccountingPreservedOnSaveRestore) { MaybeSave(); // Usage should remain the same across S/R. - uint64 anon_after_sr = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); + uint64_t anon_after_sr = ASSERT_NO_ERRNO_AND_VALUE(AnonUsageFromMeminfo()); EXPECT_THAT(anon_after_sr, EquivalentWithin(anon_after_alloc, 0.03)); } diff --git a/test/syscalls/linux/mempolicy.cc b/test/syscalls/linux/mempolicy.cc index 46bbbc923..9d5f47651 100644 --- a/test/syscalls/linux/mempolicy.cc +++ b/test/syscalls/linux/mempolicy.cc @@ -43,12 +43,12 @@ namespace { #define MPOL_MF_MOVE (1 << 1) #define MPOL_MF_MOVE_ALL (1 << 2) -int get_mempolicy(int *policy, uint64 *nmask, uint64 maxnode, void *addr, +int get_mempolicy(int *policy, uint64_t *nmask, uint64_t maxnode, void *addr, int flags) { return syscall(SYS_get_mempolicy, policy, nmask, maxnode, addr, flags); } -int set_mempolicy(int mode, uint64 *nmask, uint64 maxnode) { +int set_mempolicy(int mode, uint64_t *nmask, uint64_t maxnode) { return syscall(SYS_set_mempolicy, mode, nmask, maxnode); } @@ -68,8 +68,8 @@ Cleanup ScopedMempolicy() { // Temporarily change the memory policy for the calling thread within the // caller's scope. -PosixErrorOr ScopedSetMempolicy(int mode, uint64 *nmask, - uint64 maxnode) { +PosixErrorOr ScopedSetMempolicy(int mode, uint64_t *nmask, + uint64_t maxnode) { if (set_mempolicy(mode, nmask, maxnode)) { return PosixError(errno, "set_mempolicy"); } @@ -78,7 +78,7 @@ PosixErrorOr ScopedSetMempolicy(int mode, uint64 *nmask, TEST(MempolicyTest, CheckDefaultPolicy) { int mode = 0; - uint64 nodemask = 0; + uint64_t nodemask = 0; ASSERT_THAT(get_mempolicy(&mode, &nodemask, sizeof(nodemask) * BITS_PER_BYTE, nullptr, 0), SyscallSucceeds()); @@ -88,12 +88,12 @@ TEST(MempolicyTest, CheckDefaultPolicy) { } TEST(MempolicyTest, PolicyPreservedAfterSetMempolicy) { - uint64 nodemask = 0x1; + uint64_t nodemask = 0x1; auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy( MPOL_BIND, &nodemask, sizeof(nodemask) * BITS_PER_BYTE)); int mode = 0; - uint64 nodemask_after = 0x0; + uint64_t nodemask_after = 0x0; ASSERT_THAT(get_mempolicy(&mode, &nodemask_after, sizeof(nodemask_after) * BITS_PER_BYTE, nullptr, 0), SyscallSucceeds()); @@ -118,7 +118,7 @@ TEST(MempolicyTest, PolicyPreservedAfterSetMempolicy) { TEST(MempolicyTest, SetMempolicyRejectsInvalidInputs) { auto cleanup = ScopedMempolicy(); - uint64 nodemask; + uint64_t nodemask; if (IsRunningOnGvisor()) { // Invalid nodemask, we only support a single node on gvisor. @@ -165,7 +165,7 @@ TEST(MempolicyTest, EmptyNodemaskOnSet) { SyscallFailsWithErrno(EINVAL)); EXPECT_THAT(set_mempolicy(MPOL_PREFERRED, nullptr, 1), SyscallSucceeds()); - uint64 nodemask = 0x1; + uint64_t nodemask = 0x1; EXPECT_THAT(set_mempolicy(MPOL_DEFAULT, &nodemask, 0), SyscallFailsWithErrno(EINVAL)); EXPECT_THAT(set_mempolicy(MPOL_BIND, &nodemask, 0), @@ -175,7 +175,7 @@ TEST(MempolicyTest, EmptyNodemaskOnSet) { } TEST(MempolicyTest, QueryAvailableNodes) { - uint64 nodemask = 0; + uint64_t nodemask = 0; ASSERT_THAT( get_mempolicy(nullptr, &nodemask, sizeof(nodemask) * BITS_PER_BYTE, nullptr, MPOL_F_MEMS_ALLOWED), @@ -197,8 +197,8 @@ TEST(MempolicyTest, QueryAvailableNodes) { } TEST(MempolicyTest, GetMempolicyQueryNodeForAddress) { - uint64 dummy_stack_address; - auto dummy_heap_address = absl::make_unique(); + uint64_t dummy_stack_address; + auto dummy_heap_address = absl::make_unique(); int mode; for (auto ptr : {&dummy_stack_address, dummy_heap_address.get()}) { @@ -228,7 +228,7 @@ TEST(MempolicyTest, GetMempolicyQueryNodeForAddress) { TEST(MempolicyTest, GetMempolicyCanOmitPointers) { int mode; - uint64 nodemask; + uint64_t nodemask; // Omit nodemask pointer. ASSERT_THAT(get_mempolicy(&mode, nullptr, 0, nullptr, 0), SyscallSucceeds()); @@ -249,7 +249,7 @@ TEST(MempolicyTest, GetMempolicyNextInterleaveNode) { SyscallFailsWithErrno(EINVAL)); // Set default policy for thread to MPOL_INTERLEAVE. - uint64 nodemask = 0x1; + uint64_t nodemask = 0x1; auto cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSetMempolicy( MPOL_INTERLEAVE, &nodemask, sizeof(nodemask) * BITS_PER_BYTE)); diff --git a/test/syscalls/linux/mmap.cc b/test/syscalls/linux/mmap.cc index 9b2270c8d..1c4d9f1c7 100644 --- a/test/syscalls/linux/mmap.cc +++ b/test/syscalls/linux/mmap.cc @@ -50,13 +50,13 @@ namespace testing { namespace { -PosixErrorOr VirtualMemorySize() { +PosixErrorOr VirtualMemorySize() { ASSIGN_OR_RETURN_ERRNO(auto contents, GetContents("/proc/self/statm")); std::vector parts = absl::StrSplit(contents, ' '); if (parts.empty()) { return PosixError(EINVAL, "Unable to parse /proc/self/statm"); } - ASSIGN_OR_RETURN_ERRNO(auto pages, Atoi(parts[0])); + ASSIGN_OR_RETURN_ERRNO(auto pages, Atoi(parts[0])); return pages * getpagesize(); } @@ -245,7 +245,7 @@ TEST_F(MMapTest, MapDevZeroSharedFdNoPersistence) { // Create a second mapping via the same fd. void* psec_map = mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, dev_zero.get(), 0); - ASSERT_THAT(reinterpret_cast(psec_map), SyscallSucceeds()); + ASSERT_THAT(reinterpret_cast(psec_map), SyscallSucceeds()); // Always unmap. auto cleanup_psec_map = Cleanup( @@ -690,10 +690,10 @@ TEST_F(MMapTest, ExceedLimitDataPrlimitPID) { } TEST_F(MMapTest, NoExceedLimitAS) { - constexpr uint64 kAllocBytes = 200 << 20; + constexpr uint64_t kAllocBytes = 200 << 20; // Add some headroom to the AS limit in case of e.g. unexpected stack // expansion. - constexpr uint64 kExtraASBytes = kAllocBytes + (20 << 20); + constexpr uint64_t kExtraASBytes = kAllocBytes + (20 << 20); static_assert(kAllocBytes < kExtraASBytes, "test depends on allocation not exceeding AS limit"); @@ -708,10 +708,10 @@ TEST_F(MMapTest, NoExceedLimitAS) { } TEST_F(MMapTest, ExceedLimitAS) { - constexpr uint64 kAllocBytes = 200 << 20; + constexpr uint64_t kAllocBytes = 200 << 20; // Add some headroom to the AS limit in case of e.g. unexpected stack // expansion. - constexpr uint64 kExtraASBytes = 20 << 20; + constexpr uint64_t kExtraASBytes = 20 << 20; static_assert(kAllocBytes > kExtraASBytes, "test depends on allocation exceeding AS limit"); @@ -1469,7 +1469,7 @@ TEST_F(MMapFileTest, InternalSigBusZeroing) { SyscallFailsWithErrno(EFAULT)); } -// Checks that mmaps with a length of uint64(-PAGE_SIZE + 1) or greater do not +// Checks that mmaps with a length of uint64_t(-PAGE_SIZE + 1) or greater do not // induce a sentry panic (due to "rounding up" to 0). TEST_F(MMapTest, HugeLength) { EXPECT_THAT(Map(0, static_cast(-kPageSize + 1), PROT_NONE, diff --git a/test/syscalls/linux/open.cc b/test/syscalls/linux/open.cc index a5e790729..267ae19f6 100644 --- a/test/syscalls/linux/open.cc +++ b/test/syscalls/linux/open.cc @@ -193,7 +193,7 @@ TEST_F(OpenTest, Fault) { TEST_F(OpenTest, AppendOnly) { // First write some data to the fresh file. - const int64 kBufSize = 1024; + const int64_t kBufSize = 1024; std::vector buf(kBufSize, 'a'); FileDescriptor fd0 = ASSERT_NO_ERRNO_AND_VALUE(Open(test_file_name_, O_RDWR)); diff --git a/test/syscalls/linux/partial_bad_buffer.cc b/test/syscalls/linux/partial_bad_buffer.cc index 55eb9361f..df7129acc 100644 --- a/test/syscalls/linux/partial_bad_buffer.cc +++ b/test/syscalls/linux/partial_bad_buffer.cc @@ -363,7 +363,7 @@ TEST_F(PartialBadBufferTest, SendMsgTCP) { // byte past the valid page and check that it triggers an EFAULT // correctly. Otherwise in gVisor the sendmsg call will just return with no // error with kPageSize bytes written successfully. - const uint32 buf_size = kPageSize + 1; + const uint32_t buf_size = kPageSize + 1; ASSERT_THAT(setsockopt(send_socket.get(), SOL_SOCKET, SO_SNDBUF, &buf_size, sizeof(buf_size)), SyscallSucceedsWithValue(0)); diff --git a/test/syscalls/linux/prctl_setuid.cc b/test/syscalls/linux/prctl_setuid.cc index ad39a8463..30f0d75b3 100644 --- a/test/syscalls/linux/prctl_setuid.cc +++ b/test/syscalls/linux/prctl_setuid.cc @@ -26,7 +26,7 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -ABSL_FLAG(int32, scratch_uid, 65534, "scratch UID"); +ABSL_FLAG(int32_t, scratch_uid, 65534, "scratch UID"); // This flag is used to verify that after an exec PR_GET_KEEPCAPS // returns 0, the return code will be offset by kPrGetKeepCapsExitBase. ABSL_FLAG(bool, prctl_pr_get_keepcaps, false, diff --git a/test/syscalls/linux/proc.cc b/test/syscalls/linux/proc.cc index 0d5899ec9..bf9bb45d3 100644 --- a/test/syscalls/linux/proc.cc +++ b/test/syscalls/linux/proc.cc @@ -463,12 +463,12 @@ std::string AnonymousMapsEntryForMapping(const Mapping& m, int prot) { return AnonymousMapsEntry(m.addr(), m.len(), prot); } -PosixErrorOr> ReadProcSelfAuxv() { +PosixErrorOr> ReadProcSelfAuxv() { std::string auxv_file; RETURN_IF_ERRNO(GetContents("/proc/self/auxv", &auxv_file)); const Elf64_auxv_t* auxv_data = reinterpret_cast(auxv_file.data()); - std::map auxv_entries; + std::map auxv_entries; for (int i = 0; auxv_data[i].a_type != AT_NULL; i++) { auto a_type = auxv_data[i].a_type; EXPECT_EQ(0, auxv_entries.count(a_type)) << "a_type: " << a_type; @@ -877,7 +877,7 @@ TEST(ProcStat, Fields) { // All fields besides itime are valid base 10 numbers. for (size_t i = 1; i < fields.size(); i++) { - uint64 val; + uint64_t val; EXPECT_TRUE(absl::SimpleAtoi(fields[i], &val)) << proc_stat; } } @@ -904,7 +904,7 @@ TEST(ProcLoadavg, Fields) { EXPECT_EQ(fields.size(), 6) << proc_loadvg; double val; - uint64 val2; + uint64_t val2; // First three fields are floating point numbers. EXPECT_TRUE(absl::SimpleAtod(fields[0], &val)) << proc_loadvg; EXPECT_TRUE(absl::SimpleAtod(fields[1], &val)) << proc_loadvg; @@ -936,19 +936,19 @@ TEST_P(ProcPidStatTest, HasBasicFields) { // boot time will be very close, and the proc starttime field (which is the // delta of the two times) will be 0. For that unfortunate reason, we can // only check that starttime >= 0, and not that it is strictly > 0. - uint64 starttime; + uint64_t starttime; ASSERT_TRUE(absl::SimpleAtoi(fields[21], &starttime)); EXPECT_GE(starttime, 0); - uint64 vss; + uint64_t vss; ASSERT_TRUE(absl::SimpleAtoi(fields[22], &vss)); EXPECT_GT(vss, 0); - uint64 rss; + uint64_t rss; ASSERT_TRUE(absl::SimpleAtoi(fields[23], &rss)); EXPECT_GT(rss, 0); - uint64 rsslim; + uint64_t rsslim; ASSERT_TRUE(absl::SimpleAtoi(fields[24], &rsslim)); EXPECT_GT(rsslim, 0); } @@ -965,11 +965,11 @@ TEST_P(ProcPidStatmTest, HasBasicFields) { std::vector fields = absl::StrSplit(proc_pid_statm, ' '); ASSERT_GE(fields.size(), 7); - uint64 vss; + uint64_t vss; ASSERT_TRUE(absl::SimpleAtoi(fields[0], &vss)); EXPECT_GT(vss, 0); - uint64 rss; + uint64_t rss; ASSERT_TRUE(absl::SimpleAtoi(fields[1], &rss)); EXPECT_GT(rss, 0); } @@ -977,7 +977,7 @@ TEST_P(ProcPidStatmTest, HasBasicFields) { INSTANTIATE_TEST_SUITE_P(SelfAndNumericPid, ProcPidStatmTest, ::testing::Values("self", absl::StrCat(getpid()))); -PosixErrorOr CurrentRSS() { +PosixErrorOr CurrentRSS() { ASSIGN_OR_RETURN_ERRNO(auto proc_self_stat, GetContents("/proc/self/stat")); if (proc_self_stat.empty()) { return PosixError(EINVAL, "empty /proc/self/stat"); @@ -990,7 +990,7 @@ PosixErrorOr CurrentRSS() { absl::StrCat("/proc/self/stat has too few fields: ", proc_self_stat)); } - uint64 rss; + uint64_t rss; if (!absl::SimpleAtoi(fields[23], &rss)) { return PosixError( EINVAL, absl::StrCat("/proc/self/stat RSS field is not a number: ", @@ -1002,14 +1002,14 @@ PosixErrorOr CurrentRSS() { } // The size of mapping created by MapPopulateRSS. -constexpr uint64 kMappingSize = 100 << 20; +constexpr uint64_t kMappingSize = 100 << 20; // Tolerance on RSS comparisons to account for background thread mappings, // reclaimed pages, newly faulted pages, etc. -constexpr uint64 kRSSTolerance = 5 << 20; +constexpr uint64_t kRSSTolerance = 5 << 20; // Capture RSS before and after an anonymous mapping with passed prot. -void MapPopulateRSS(int prot, uint64* before, uint64* after) { +void MapPopulateRSS(int prot, uint64_t* before, uint64_t* after) { *before = ASSERT_NO_ERRNO_AND_VALUE(CurrentRSS()); // N.B. The kernel asynchronously accumulates per-task RSS counters into the @@ -1040,7 +1040,7 @@ void MapPopulateRSS(int prot, uint64* before, uint64* after) { // PROT_WRITE + MAP_POPULATE anonymous mappings are always committed. TEST(ProcSelfStat, PopulateWriteRSS) { - uint64 before, after; + uint64_t before, after; MapPopulateRSS(PROT_READ | PROT_WRITE, &before, &after); // Mapping is committed. @@ -1049,7 +1049,7 @@ TEST(ProcSelfStat, PopulateWriteRSS) { // PROT_NONE + MAP_POPULATE anonymous mappings are never committed. TEST(ProcSelfStat, PopulateNoneRSS) { - uint64 before, after; + uint64_t before, after; MapPopulateRSS(PROT_NONE, &before, &after); // Mapping not committed. @@ -1766,7 +1766,7 @@ TEST(ProcTask, VerifyTaskDirNlinks) { // Once we reach the test body, we can count on the thread count being stable // unless we spawn a new one. - uint64 initial_links = ASSERT_NO_ERRNO_AND_VALUE(Links("/proc/self/task")); + uint64_t initial_links = ASSERT_NO_ERRNO_AND_VALUE(Links("/proc/self/task")); ASSERT_GE(initial_links, 3); // For each new subtask, we should gain a new link. @@ -1864,9 +1864,9 @@ TEST(ProcFilesystems, Bug65172365) { } TEST(ProcFilesystems, PresenceOfShmMaxMniAll) { - uint64 shmmax = 0; - uint64 shmall = 0; - uint64 shmmni = 0; + uint64_t shmmax = 0; + uint64_t shmall = 0; + uint64_t shmmni = 0; std::string proc_file; proc_file = ASSERT_NO_ERRNO_AND_VALUE(GetContents("/proc/sys/kernel/shmmax")); ASSERT_FALSE(proc_file.empty()); diff --git a/test/syscalls/linux/proc_net_tcp.cc b/test/syscalls/linux/proc_net_tcp.cc index 77183420b..5b6e3e3cd 100644 --- a/test/syscalls/linux/proc_net_tcp.cc +++ b/test/syscalls/linux/proc_net_tcp.cc @@ -40,15 +40,15 @@ constexpr char kProcNetTCPHeader[] = // TCPEntry represents a single entry from /proc/net/tcp. struct TCPEntry { - uint32 local_addr; - uint16 local_port; + uint32_t local_addr; + uint16_t local_port; - uint32 remote_addr; - uint16 remote_port; + uint32_t remote_addr; + uint16_t remote_port; - uint64 state; - uint64 uid; - uint64 inode; + uint64_t state; + uint64_t uid; + uint64_t inode; }; // Finds the first entry in 'entries' for which 'predicate' returns true. @@ -69,8 +69,8 @@ bool FindBy(const std::vector& entries, TCPEntry* match, bool FindByLocalAddr(const std::vector& entries, TCPEntry* match, const struct sockaddr* addr) { - uint32 host = IPFromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint32_t host = IPFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy(entries, match, [host, port](const TCPEntry& e) { return (e.local_addr == host && e.local_port == port); }); @@ -78,8 +78,8 @@ bool FindByLocalAddr(const std::vector& entries, TCPEntry* match, bool FindByRemoteAddr(const std::vector& entries, TCPEntry* match, const struct sockaddr* addr) { - uint32 host = IPFromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint32_t host = IPFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy(entries, match, [host, port](const TCPEntry& e) { return (e.remote_addr == host && e.remote_port == port); }); @@ -131,8 +131,8 @@ PosixErrorOr> ProcNetTCPEntries() { ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16)); ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16)); - ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); - ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); + ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); + ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); entries.push_back(entry); } @@ -234,8 +234,8 @@ TEST(ProcNetTCP, State) { FileDescriptor accepted = ASSERT_NO_ERRNO_AND_VALUE(Accept(server->get(), nullptr, nullptr)); - const uint32 accepted_local_host = IPFromInetSockaddr(&addr); - const uint16 accepted_local_port = PortFromInetSockaddr(&addr); + const uint32_t accepted_local_host = IPFromInetSockaddr(&addr); + const uint16_t accepted_local_port = PortFromInetSockaddr(&addr); entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCPEntries()); TCPEntry accepted_entry; @@ -258,14 +258,14 @@ constexpr char kProcNetTCP6Header[] = // TCP6Entry represents a single entry from /proc/net/tcp6. struct TCP6Entry { struct in6_addr local_addr; - uint16 local_port; + uint16_t local_port; struct in6_addr remote_addr; - uint16 remote_port; + uint16_t remote_port; - uint64 state; - uint64 uid; - uint64 inode; + uint64_t state; + uint64_t uid; + uint64_t inode; }; bool IPv6AddrEqual(const struct in6_addr* a1, const struct in6_addr* a2) { @@ -296,7 +296,7 @@ const struct in6_addr* IP6FromInetSockaddr(const struct sockaddr* addr) { bool FindByLocalAddr6(const std::vector& entries, TCP6Entry* match, const struct sockaddr* addr) { const struct in6_addr* local = IP6FromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy6(entries, match, [local, port](const TCP6Entry& e) { return (IPv6AddrEqual(&e.local_addr, local) && e.local_port == port); }); @@ -305,22 +305,22 @@ bool FindByLocalAddr6(const std::vector& entries, TCP6Entry* match, bool FindByRemoteAddr6(const std::vector& entries, TCP6Entry* match, const struct sockaddr* addr) { const struct in6_addr* remote = IP6FromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy6(entries, match, [remote, port](const TCP6Entry& e) { return (IPv6AddrEqual(&e.remote_addr, remote) && e.remote_port == port); }); } void ReadIPv6Address(std::string s, struct in6_addr* addr) { - uint32 a0, a1, a2, a3; + uint32_t a0, a1, a2, a3; const char* fmt = "%08X%08X%08X%08X"; EXPECT_EQ(sscanf(s.c_str(), fmt, &a0, &a1, &a2, &a3), 4); - uint8* b = addr->s6_addr; - *((uint32*)&b[0]) = a0; - *((uint32*)&b[4]) = a1; - *((uint32*)&b[8]) = a2; - *((uint32*)&b[12]) = a3; + uint8_t* b = addr->s6_addr; + *((uint32_t*)&b[0]) = a0; + *((uint32_t*)&b[4]) = a1; + *((uint32_t*)&b[8]) = a2; + *((uint32_t*)&b[12]) = a3; } // Returns a parsed representation of /proc/net/tcp6 entries. @@ -367,8 +367,8 @@ PosixErrorOr> ProcNetTCP6Entries() { ReadIPv6Address(fields[3], &entry.remote_addr); ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16)); ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16)); - ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); - ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); + ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); + ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); entries.push_back(entry); } @@ -476,7 +476,7 @@ TEST(ProcNetTCP6, State) { ASSERT_NO_ERRNO_AND_VALUE(Accept(server->get(), nullptr, nullptr)); const struct in6_addr* local = IP6FromInetSockaddr(addr); - const uint16 accepted_local_port = PortFromInetSockaddr(addr); + const uint16_t accepted_local_port = PortFromInetSockaddr(addr); entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetTCP6Entries()); TCP6Entry accepted_entry; diff --git a/test/syscalls/linux/proc_net_udp.cc b/test/syscalls/linux/proc_net_udp.cc index 98c1e0cf1..786b4b4af 100644 --- a/test/syscalls/linux/proc_net_udp.cc +++ b/test/syscalls/linux/proc_net_udp.cc @@ -40,15 +40,15 @@ constexpr char kProcNetUDPHeader[] = // UDPEntry represents a single entry from /proc/net/udp. struct UDPEntry { - uint32 local_addr; - uint16 local_port; + uint32_t local_addr; + uint16_t local_port; - uint32 remote_addr; - uint16 remote_port; + uint32_t remote_addr; + uint16_t remote_port; - uint64 state; - uint64 uid; - uint64 inode; + uint64_t state; + uint64_t uid; + uint64_t inode; }; std::string DescribeFirstInetSocket(const SocketPair& sockets) { @@ -81,8 +81,8 @@ bool FindBy(const std::vector& entries, UDPEntry* match, bool FindByLocalAddr(const std::vector& entries, UDPEntry* match, const struct sockaddr* addr) { - uint32 host = IPFromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint32_t host = IPFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy(entries, match, [host, port](const UDPEntry& e) { return (e.local_addr == host && e.local_port == port); }); @@ -90,14 +90,14 @@ bool FindByLocalAddr(const std::vector& entries, UDPEntry* match, bool FindByRemoteAddr(const std::vector& entries, UDPEntry* match, const struct sockaddr* addr) { - uint32 host = IPFromInetSockaddr(addr); - uint16 port = PortFromInetSockaddr(addr); + uint32_t host = IPFromInetSockaddr(addr); + uint16_t port = PortFromInetSockaddr(addr); return FindBy(entries, match, [host, port](const UDPEntry& e) { return (e.remote_addr == host && e.remote_port == port); }); } -PosixErrorOr InodeFromSocketFD(int fd) { +PosixErrorOr InodeFromSocketFD(int fd) { ASSIGN_OR_RETURN_ERRNO(struct stat s, Fstat(fd)); if (!S_ISSOCK(s.st_mode)) { return PosixError(EINVAL, StrFormat("FD %d is not a socket", fd)); @@ -107,7 +107,7 @@ PosixErrorOr InodeFromSocketFD(int fd) { PosixErrorOr FindByFD(const std::vector& entries, UDPEntry* match, int fd) { - ASSIGN_OR_RETURN_ERRNO(uint64 inode, InodeFromSocketFD(fd)); + ASSIGN_OR_RETURN_ERRNO(uint64_t inode, InodeFromSocketFD(fd)); return FindBy(entries, match, [inode](const UDPEntry& e) { return (e.inode == inode); }); } @@ -158,8 +158,8 @@ PosixErrorOr> ProcNetUDPEntries() { ASSIGN_OR_RETURN_ERRNO(entry.remote_port, AtoiBase(fields[4], 16)); ASSIGN_OR_RETURN_ERRNO(entry.state, AtoiBase(fields[5], 16)); - ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); - ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); + ASSIGN_OR_RETURN_ERRNO(entry.uid, Atoi(fields[11])); + ASSIGN_OR_RETURN_ERRNO(entry.inode, Atoi(fields[13])); // Linux shares internal data structures between TCP and UDP sockets. The // proc entries for UDP sockets share some fields with TCP sockets, but @@ -267,7 +267,7 @@ TEST(ProcNetUDP, BoundEntry) { struct sockaddr addr; socklen_t len = sizeof(addr); ASSERT_THAT(getsockname(socket->get(), &addr, &len), SyscallSucceeds()); - uint16 port = PortFromInetSockaddr(&addr); + uint16_t port = PortFromInetSockaddr(&addr); std::vector entries = ASSERT_NO_ERRNO_AND_VALUE(ProcNetUDPEntries()); diff --git a/test/syscalls/linux/proc_net_unix.cc b/test/syscalls/linux/proc_net_unix.cc index 2fe63f215..66db0acaa 100644 --- a/test/syscalls/linux/proc_net_unix.cc +++ b/test/syscalls/linux/proc_net_unix.cc @@ -46,12 +46,12 @@ enum { // UnixEntry represents a single entry from /proc/net/unix. struct UnixEntry { uintptr_t addr; - uint64 refs; - uint64 protocol; - uint64 flags; - uint64 type; - uint64 state; - uint64 inode; + uint64_t refs; + uint64_t protocol; + uint64_t flags; + uint64_t type; + uint64_t state; + uint64_t inode; std::string path; }; diff --git a/test/syscalls/linux/proc_pid_uid_gid_map.cc b/test/syscalls/linux/proc_pid_uid_gid_map.cc index 8e268ebd1..748f7be58 100644 --- a/test/syscalls/linux/proc_pid_uid_gid_map.cc +++ b/test/syscalls/linux/proc_pid_uid_gid_map.cc @@ -117,13 +117,13 @@ void DenyPidSetgroups(pid_t pid) { } // Returns a valid UID/GID that isn't id. -uint32 another_id(uint32 id) { return (id + 1) % 65535; } +uint32_t another_id(uint32_t id) { return (id + 1) % 65535; } struct TestParam { std::string desc; int cap; std::function get_map_filename; - std::function get_current_id; + std::function get_current_id; }; std::string DescribeTestParam(const ::testing::TestParamInfo& info) { @@ -135,17 +135,17 @@ std::vector UidGidMapTestParams() { [](absl::string_view pid) { return absl::StrCat("/proc/", pid, "/uid_map"); }, - []() -> uint32 { return getuid(); }}, + []() -> uint32_t { return getuid(); }}, TestParam{"GID", CAP_SETGID, [](absl::string_view pid) { return absl::StrCat("/proc/", pid, "/gid_map"); }, - []() -> uint32 { return getgid(); }}}; + []() -> uint32_t { return getgid(); }}}; } class ProcUidGidMapTest : public ::testing::TestWithParam { protected: - uint32 CurrentID() { return GetParam().get_current_id(); } + uint32_t CurrentID() { return GetParam().get_current_id(); } }; class ProcSelfUidGidMapTest : public ProcUidGidMapTest { @@ -198,7 +198,7 @@ TEST_P(ProcSelfUidGidMapTest, IsInitiallyEmpty) { TEST_P(ProcSelfUidGidMapTest, IdentityMapOwnID) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace())); - uint32 id = CurrentID(); + uint32_t id = CurrentID(); std::string line = absl::StrCat(id, " ", id, " 1"); EXPECT_THAT( InNewUserNamespaceWithMapFD([&](int fd) { @@ -213,7 +213,7 @@ TEST_P(ProcSelfUidGidMapTest, TrailingNewlineAndNULIgnored) { // and an invalid (incomplete) map entry are appended to the valid entry. The // newline should be accepted, and everything after the NUL should be ignored. SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace())); - uint32 id = CurrentID(); + uint32_t id = CurrentID(); std::string line = absl::StrCat(id, " ", id, " 1\n\0 4 3"); EXPECT_THAT( InNewUserNamespaceWithMapFD([&](int fd) { @@ -227,8 +227,8 @@ TEST_P(ProcSelfUidGidMapTest, TrailingNewlineAndNULIgnored) { TEST_P(ProcSelfUidGidMapTest, NonIdentityMapOwnID) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(CanCreateUserNamespace())); - uint32 id = CurrentID(); - uint32 id2 = another_id(id); + uint32_t id = CurrentID(); + uint32_t id2 = another_id(id); std::string line = absl::StrCat(id2, " ", id, " 1"); EXPECT_THAT( InNewUserNamespaceWithMapFD([&](int fd) { @@ -243,8 +243,8 @@ TEST_P(ProcSelfUidGidMapTest, MapOtherID) { // Whether or not we have CAP_SET*ID is irrelevant: the process running in the // new (child) user namespace won't have any capabilities in the current // (parent) user namespace, which is needed. - uint32 id = CurrentID(); - uint32 id2 = another_id(id); + uint32_t id = CurrentID(); + uint32_t id2 = another_id(id); std::string line = absl::StrCat(id, " ", id2, " 1"); EXPECT_THAT(InNewUserNamespaceWithMapFD([&](int fd) { DenySelfSetgroups(); @@ -270,8 +270,8 @@ TEST_P(ProcPidUidGidMapTest, MapOtherIDPrivileged) { std::tie(child_pid, cleanup_child) = ASSERT_NO_ERRNO_AND_VALUE(CreateProcessInNewUserNamespace()); - uint32 id = CurrentID(); - uint32 id2 = another_id(id); + uint32_t id = CurrentID(); + uint32_t id2 = another_id(id); std::string line = absl::StrCat(id, " ", id2, " 1"); DenyPidSetgroups(child_pid); auto fd = ASSERT_NO_ERRNO_AND_VALUE(OpenMapFile(child_pid)); diff --git a/test/syscalls/linux/ptrace.cc b/test/syscalls/linux/ptrace.cc index 37dabb1ad..8f3800380 100644 --- a/test/syscalls/linux/ptrace.cc +++ b/test/syscalls/linux/ptrace.cc @@ -574,7 +574,7 @@ TEST_P(PtraceExecveTest, Execve_GetRegs_PeekUser_SIGKILL_TraceClone_TraceExit) { #ifdef __x86_64__ { // CS should be 0x33, indicating an 64-bit binary. - constexpr uint64 kAMD64UserCS = 0x33; + constexpr uint64_t kAMD64UserCS = 0x33; EXPECT_THAT(ptrace(PTRACE_PEEKUSER, leader_tid, offsetof(struct user_regs_struct, cs), 0), SyscallSucceedsWithValue(kAMD64UserCS)); @@ -862,7 +862,7 @@ TEST(PtraceTest, Int3) { TEST(PtraceTest, Sysemu_PokeUser) { constexpr int kSysemuHelperFirstExitCode = 126; - constexpr uint64 kSysemuInjectedExitGroupReturn = 42; + constexpr uint64_t kSysemuInjectedExitGroupReturn = 42; pid_t const child_pid = fork(); if (child_pid == 0) { diff --git a/test/syscalls/linux/pty.cc b/test/syscalls/linux/pty.cc index 5020372c1..dafe64d20 100644 --- a/test/syscalls/linux/pty.cc +++ b/test/syscalls/linux/pty.cc @@ -109,13 +109,13 @@ constexpr bool IsControlCharacter(char c) { return c <= 31; } struct Field { const char* name; - uint64 mask; - uint64 value; + uint64_t mask; + uint64_t value; }; // ParseFields returns a string representation of value, using the names in // fields. -std::string ParseFields(const Field* fields, size_t len, uint64 value) { +std::string ParseFields(const Field* fields, size_t len, uint64_t value) { bool first = true; std::string s; for (size_t i = 0; i < len; i++) { @@ -1213,8 +1213,8 @@ TEST_F(PtyTest, GetWindowSize) { } TEST_F(PtyTest, SetSlaveWindowSize) { - constexpr uint16 kRows = 343; - constexpr uint16 kCols = 2401; + constexpr uint16_t kRows = 343; + constexpr uint16_t kCols = 2401; struct winsize ws = {.ws_row = kRows, .ws_col = kCols}; ASSERT_THAT(ioctl(slave_.get(), TIOCSWINSZ, &ws), SyscallSucceeds()); @@ -1226,8 +1226,8 @@ TEST_F(PtyTest, SetSlaveWindowSize) { } TEST_F(PtyTest, SetMasterWindowSize) { - constexpr uint16 kRows = 343; - constexpr uint16 kCols = 2401; + constexpr uint16_t kRows = 343; + constexpr uint16_t kCols = 2401; struct winsize ws = {.ws_row = kRows, .ws_col = kCols}; ASSERT_THAT(ioctl(master_.get(), TIOCSWINSZ, &ws), SyscallSucceeds()); diff --git a/test/syscalls/linux/pwrite64.cc b/test/syscalls/linux/pwrite64.cc index 18f847929..b48fe540d 100644 --- a/test/syscalls/linux/pwrite64.cc +++ b/test/syscalls/linux/pwrite64.cc @@ -52,7 +52,7 @@ class Pwrite64 : public ::testing::Test { TEST_F(Pwrite64, AppendOnly) { int fd; ASSERT_THAT(fd = open(name_.c_str(), O_APPEND | O_RDWR), SyscallSucceeds()); - constexpr int64 kBufSize = 1024; + constexpr int64_t kBufSize = 1024; std::vector buf(kBufSize); std::fill(buf.begin(), buf.end(), 'a'); EXPECT_THAT(PwriteFd(fd, buf.data(), buf.size(), 0), @@ -64,7 +64,7 @@ TEST_F(Pwrite64, AppendOnly) { TEST_F(Pwrite64, InvalidArgs) { int fd; ASSERT_THAT(fd = open(name_.c_str(), O_APPEND | O_RDWR), SyscallSucceeds()); - constexpr int64 kBufSize = 1024; + constexpr int64_t kBufSize = 1024; std::vector buf(kBufSize); std::fill(buf.begin(), buf.end(), 'a'); EXPECT_THAT(PwriteFd(fd, buf.data(), buf.size(), -1), diff --git a/test/syscalls/linux/raw_socket_hdrincl.cc b/test/syscalls/linux/raw_socket_hdrincl.cc index 0c04b974e..0a27506aa 100644 --- a/test/syscalls/linux/raw_socket_hdrincl.cc +++ b/test/syscalls/linux/raw_socket_hdrincl.cc @@ -53,7 +53,7 @@ class RawHDRINCL : public ::testing::Test { // Fills in buf with an IP header, UDP header, and payload. Returns false if // buf_size isn't large enough to hold everything. bool FillPacket(char* buf, size_t buf_size, int port, const char* payload, - uint16 payload_size); + uint16_t payload_size); // The socket used for both reading and writing. int socket_; @@ -104,7 +104,7 @@ struct iphdr RawHDRINCL::LoopbackHeader() { } bool RawHDRINCL::FillPacket(char* buf, size_t buf_size, int port, - const char* payload, uint16 payload_size) { + const char* payload, uint16_t payload_size) { if (buf_size < sizeof(struct iphdr) + sizeof(struct udphdr) + payload_size) { return false; } diff --git a/test/syscalls/linux/rseq.cc b/test/syscalls/linux/rseq.cc index 9b2a76b91..106c045e3 100644 --- a/test/syscalls/linux/rseq.cc +++ b/test/syscalls/linux/rseq.cc @@ -43,7 +43,7 @@ namespace { // only be cleared by execve (or knowing the old rseq address), and glibc (based // on the current unmerged patches) register rseq before calling main()). -int RSeq(struct rseq* rseq, uint32 rseq_len, int flags, uint32 sig) { +int RSeq(struct rseq* rseq, uint32_t rseq_len, int flags, uint32_t sig) { return syscall(kRseqSyscall, rseq, rseq_len, flags, sig); } diff --git a/test/syscalls/linux/rseq/critical.h b/test/syscalls/linux/rseq/critical.h index 238143fd0..ac987a25e 100644 --- a/test/syscalls/linux/rseq/critical.h +++ b/test/syscalls/linux/rseq/critical.h @@ -18,7 +18,7 @@ #include "test/syscalls/linux/rseq/types.h" #include "test/syscalls/linux/rseq/uapi.h" -constexpr uint32 kRseqSignature = 0x90909090; +constexpr uint32_t kRseqSignature = 0x90909090; extern "C" { diff --git a/test/syscalls/linux/rseq/rseq.cc b/test/syscalls/linux/rseq/rseq.cc index 4fe7c5ecf..f036db26d 100644 --- a/test/syscalls/linux/rseq/rseq.cc +++ b/test/syscalls/linux/rseq/rseq.cc @@ -49,7 +49,7 @@ int strcmp(const char* s1, const char* s2) { return static_cast(*p1) - static_cast(*p2); } -int sys_rseq(struct rseq* rseq, uint32 rseq_len, int flags, uint32 sig) { +int sys_rseq(struct rseq* rseq, uint32_t rseq_len, int flags, uint32_t sig) { return raw_syscall(kRseqSyscall, rseq, rseq_len, flags, sig); } @@ -176,10 +176,10 @@ int TestAbort() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_abort); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_abort); // Loops until abort. If this returns then abort occurred. rseq_loop(&r, &cs); @@ -198,10 +198,10 @@ int TestAbortBefore() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_early_abort); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_early_abort); // Loops until abort. If this returns then abort occurred. rseq_loop(&r, &cs); @@ -220,10 +220,10 @@ int TestAbortSignature() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_abort); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_abort); // Loops until abort. This should SIGSEGV on abort. rseq_loop(&r, &cs); @@ -242,10 +242,10 @@ int TestAbortPreCommit() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_pre_commit); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_pre_commit); // Loops until abort. This should SIGSEGV on abort. rseq_loop(&r, &cs); @@ -264,10 +264,10 @@ int TestAbortClearsCS() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_abort); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_abort); // Loops until abort. If this returns then abort occurred. rseq_loop(&r, &cs); @@ -290,10 +290,10 @@ int TestInvalidAbortClearsCS() { struct rseq_cs cs = {}; cs.version = 0; cs.flags = 0; - cs.start_ip = reinterpret_cast(&rseq_loop_start); - cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - - reinterpret_cast(&rseq_loop_start); - cs.abort_ip = reinterpret_cast(&rseq_loop_abort); + cs.start_ip = reinterpret_cast(&rseq_loop_start); + cs.post_commit_offset = reinterpret_cast(&rseq_loop_post_commit) - + reinterpret_cast(&rseq_loop_start); + cs.abort_ip = reinterpret_cast(&rseq_loop_abort); __atomic_store_n(&r.rseq_cs, &cs, __ATOMIC_RELAXED); diff --git a/test/syscalls/linux/rseq/types.h b/test/syscalls/linux/rseq/types.h index 7f1e0c5c2..b6afe9817 100644 --- a/test/syscalls/linux/rseq/types.h +++ b/test/syscalls/linux/rseq/types.h @@ -18,14 +18,14 @@ using size_t = __SIZE_TYPE__; using uintptr_t = __UINTPTR_TYPE__; -using uint8 = __UINT8_TYPE__; -using uint16 = __UINT16_TYPE__; -using uint32 = __UINT32_TYPE__; -using uint64 = __UINT64_TYPE__; +using uint8_t = __UINT8_TYPE__; +using uint16_t = __UINT16_TYPE__; +using uint32_t = __UINT32_TYPE__; +using uint64_t = __UINT64_TYPE__; -using int8 = __INT8_TYPE__; -using int16 = __INT16_TYPE__; -using int32 = __INT32_TYPE__; -using int64 = __INT64_TYPE__; +using int8_t = __INT8_TYPE__; +using int16_t = __INT16_TYPE__; +using int32_t = __INT32_TYPE__; +using int64_t = __INT64_TYPE__; #endif // GVISOR_TEST_SYSCALLS_LINUX_RSEQ_TYPES_H_ diff --git a/test/syscalls/linux/seccomp.cc b/test/syscalls/linux/seccomp.cc index 7a2c1191a..7e41fe7d8 100644 --- a/test/syscalls/linux/seccomp.cc +++ b/test/syscalls/linux/seccomp.cc @@ -49,12 +49,12 @@ namespace testing { namespace { // A syscall not implemented by Linux that we don't expect to be called. -constexpr uint32 kFilteredSyscall = SYS_vserver; +constexpr uint32_t kFilteredSyscall = SYS_vserver; // Applies a seccomp-bpf filter that returns `filtered_result` for // `sysno` and allows all other syscalls. Async-signal-safe. -void ApplySeccompFilter(uint32 sysno, uint32 filtered_result, - uint32 flags = 0) { +void ApplySeccompFilter(uint32_t sysno, uint32_t filtered_result, + uint32_t flags = 0) { // "Prior to [PR_SET_SECCOMP], the task must call prctl(PR_SET_NO_NEW_PRIVS, // 1) or run with CAP_SYS_ADMIN privileges in its namespace." - // Documentation/prctl/seccomp_filter.txt @@ -162,7 +162,7 @@ TEST(SeccompTest, RetKillOnlyKillsOneThread) { TEST(SeccompTest, RetTrapCausesSIGSYS) { pid_t const pid = fork(); if (pid == 0) { - constexpr uint16 kTrapValue = 0xdead; + constexpr uint16_t kTrapValue = 0xdead; RegisterSignalHandler( SIGSYS, +[](int signo, siginfo_t* info, void* ucv) { ucontext_t* uc = static_cast(ucv); @@ -191,7 +191,7 @@ TEST(SeccompTest, RetTrapCausesSIGSYS) { #ifdef __x86_64__ -constexpr uint64 kVsyscallTimeEntry = 0xffffffffff600400; +constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400; time_t vsyscall_time(time_t* t) { return reinterpret_cast(kVsyscallTimeEntry)(t); @@ -202,7 +202,7 @@ TEST(SeccompTest, SeccompAppliesToVsyscall) { pid_t const pid = fork(); if (pid == 0) { - constexpr uint16 kTrapValue = 0xdead; + constexpr uint16_t kTrapValue = 0xdead; RegisterSignalHandler( SIGSYS, +[](int signo, siginfo_t* info, void* ucv) { ucontext_t* uc = static_cast(ucv); @@ -335,7 +335,7 @@ TEST(SeccompTest, TsyncAppliesToAllThreads) { // This test will validate that seccomp(2) rejects unsupported flags. TEST(SeccompTest, SeccompRejectsUnknownFlags) { - constexpr uint32 kInvalidFlag = 123; + constexpr uint32_t kInvalidFlag = 123; ASSERT_THAT( syscall(__NR_seccomp, SECCOMP_SET_MODE_FILTER, kInvalidFlag, nullptr), SyscallFailsWithErrno(EINVAL)); diff --git a/test/syscalls/linux/semaphore.cc b/test/syscalls/linux/semaphore.cc index a9e8a44c1..e9b131ca9 100644 --- a/test/syscalls/linux/semaphore.cc +++ b/test/syscalls/linux/semaphore.cc @@ -274,7 +274,7 @@ TEST(SemaphoreTest, SemOpRandom) { // Protects the seed below. absl::Mutex mutex; - uint32 seed = time(nullptr); + uint32_t seed = time(nullptr); int count = 0; // Tracks semaphore value. bool done = false; // Tells waiters to stop after signal threads are done. @@ -284,7 +284,7 @@ TEST(SemaphoreTest, SemOpRandom) { for (auto& dec : decs) { dec = absl::make_unique([&sem, &mutex, &count, &seed, &done] { for (size_t i = 0; i < 500; ++i) { - int16 val; + int16_t val; { absl::MutexLock l(&mutex); if (done) { @@ -325,7 +325,7 @@ TEST(SemaphoreTest, SemOpRandom) { for (auto& inc : incs) { inc = absl::make_unique([&sem, &mutex, &count, &seed] { for (size_t i = 0; i < 500; ++i) { - int16 val; + int16_t val; { absl::MutexLock l(&mutex); val = (rand_r(&seed) % 10 + 1); // Rand between 1 and 10. @@ -415,14 +415,14 @@ TEST(SemaphoreTest, SemCtlValAll) { ASSERT_THAT(sem.get(), SyscallSucceeds()); // Semaphores must start with 0. - uint16 get[3] = {10, 10, 10}; + uint16_t get[3] = {10, 10, 10}; EXPECT_THAT(semctl(sem.get(), 1, GETALL, get), SyscallSucceedsWithValue(0)); for (auto v : get) { EXPECT_EQ(v, 0); } // SetAll and check that they were set. - uint16 vals[3] = {0, 10, 20}; + uint16_t vals[3] = {0, 10, 20}; EXPECT_THAT(semctl(sem.get(), 1, SETALL, vals), SyscallSucceedsWithValue(0)); EXPECT_THAT(semctl(sem.get(), 1, GETALL, get), SyscallSucceedsWithValue(0)); for (size_t i = 0; i < ABSL_ARRAYSIZE(vals); ++i) { diff --git a/test/syscalls/linux/shm.cc b/test/syscalls/linux/shm.cc index 80700615f..7ba752599 100644 --- a/test/syscalls/linux/shm.cc +++ b/test/syscalls/linux/shm.cc @@ -30,7 +30,7 @@ namespace { using ::testing::_; -const uint64 kAllocSize = kPageSize * 128ULL; +const uint64_t kAllocSize = kPageSize * 128ULL; PosixErrorOr Shmat(int shmid, const void* shmaddr, int shmflg) { const intptr_t addr = @@ -320,11 +320,11 @@ TEST(ShmTest, RemovedSegmentsAreDestroyed) { Shmget(IPC_PRIVATE, kAllocSize, IPC_CREAT | 0777)); const char* addr = ASSERT_NO_ERRNO_AND_VALUE(Shmat(shm.id(), nullptr, 0)); - const uint64 alloc_pages = kAllocSize / kPageSize; + const uint64_t alloc_pages = kAllocSize / kPageSize; struct shm_info info; ASSERT_NO_ERRNO(Shmctl(0 /*ignored*/, SHM_INFO, &info)); - const uint64 before = info.shm_tot; + const uint64_t before = info.shm_tot; ASSERT_NO_ERRNO(shm.Rmid()); ASSERT_NO_ERRNO(Shmdt(addr)); @@ -400,7 +400,7 @@ TEST(ShmDeathTest, SegmentNotAccessibleAfterDetach) { TEST(ShmTest, RequestingSegmentSmallerThanSHMMINFails) { struct shminfo info; ASSERT_NO_ERRNO(Shmctl(0, IPC_INFO, &info)); - const uint64 size = info.shmmin - 1; + const uint64_t size = info.shmmin - 1; EXPECT_THAT(Shmget(IPC_PRIVATE, size, IPC_CREAT | 0777), PosixErrorIs(EINVAL, _)); } @@ -408,7 +408,7 @@ TEST(ShmTest, RequestingSegmentSmallerThanSHMMINFails) { TEST(ShmTest, RequestingSegmentLargerThanSHMMAXFails) { struct shminfo info; ASSERT_NO_ERRNO(Shmctl(0, IPC_INFO, &info)); - const uint64 size = info.shmmax + kPageSize; + const uint64_t size = info.shmmax + kPageSize; EXPECT_THAT(Shmget(IPC_PRIVATE, size, IPC_CREAT | 0777), PosixErrorIs(EINVAL, _)); } diff --git a/test/syscalls/linux/sigaltstack.cc b/test/syscalls/linux/sigaltstack.cc index 9a0816e10..62b04ef1d 100644 --- a/test/syscalls/linux/sigaltstack.cc +++ b/test/syscalls/linux/sigaltstack.cc @@ -114,7 +114,7 @@ TEST(SigaltstackTest, ResetByExecve) { volatile bool badhandler_on_sigaltstack = true; // Set by the handler. char* volatile badhandler_low_water_mark = nullptr; // Set by the handler. -volatile uint8 badhandler_recursive_faults = 0; // Consumed by the handler. +volatile uint8_t badhandler_recursive_faults = 0; // Consumed by the handler. void badhandler(int sig, siginfo_t* siginfo, void* arg) { char stack_var = 0; diff --git a/test/syscalls/linux/sigiret.cc b/test/syscalls/linux/sigiret.cc index 207506569..a47c781ea 100644 --- a/test/syscalls/linux/sigiret.cc +++ b/test/syscalls/linux/sigiret.cc @@ -28,8 +28,8 @@ namespace testing { namespace { -constexpr uint64 kOrigRcx = 0xdeadbeeffacefeed; -constexpr uint64 kOrigR11 = 0xfacefeedbaad1dea; +constexpr uint64_t kOrigRcx = 0xdeadbeeffacefeed; +constexpr uint64_t kOrigR11 = 0xfacefeedbaad1dea; volatile int gotvtalrm, ready; @@ -40,8 +40,8 @@ void sigvtalrm(int sig, siginfo_t* siginfo, void* _uc) { // - test is in the busy-wait loop waiting for signal. // - %rcx and %r11 values in mcontext_t match kOrigRcx and kOrigR11. if (ready && - static_cast(uc->uc_mcontext.gregs[REG_RCX]) == kOrigRcx && - static_cast(uc->uc_mcontext.gregs[REG_R11]) == kOrigR11) { + static_cast(uc->uc_mcontext.gregs[REG_RCX]) == kOrigRcx && + static_cast(uc->uc_mcontext.gregs[REG_R11]) == kOrigR11) { // Modify the values %rcx and %r11 in the ucontext. These are the // values seen by the application after the signal handler returns. uc->uc_mcontext.gregs[REG_RCX] = ~kOrigRcx; @@ -69,8 +69,8 @@ TEST(SigIretTest, CheckRcxR11) { ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_VIRTUAL, itimer)); // Initialize %rcx and %r11 and spin until the signal handler returns. - uint64 rcx = kOrigRcx; - uint64 r11 = kOrigR11; + uint64_t rcx = kOrigRcx; + uint64_t r11 = kOrigR11; asm volatile( "movq %[rcx], %%rcx;" // %rcx = rcx "movq %[r11], %%r11;" // %r11 = r11 @@ -91,7 +91,7 @@ TEST(SigIretTest, CheckRcxR11) { EXPECT_EQ(r11, ~kOrigR11); } -constexpr uint64 kNonCanonicalRip = 0xCCCC000000000000; +constexpr uint64_t kNonCanonicalRip = 0xCCCC000000000000; // Test that a non-canonical signal handler faults as expected. TEST(SigIretTest, BadHandler) { diff --git a/test/syscalls/linux/socket_bind_to_device_distribution.cc b/test/syscalls/linux/socket_bind_to_device_distribution.cc index c705da1b4..5ed57625c 100644 --- a/test/syscalls/linux/socket_bind_to_device_distribution.cc +++ b/test/syscalls/linux/socket_bind_to_device_distribution.cc @@ -77,13 +77,13 @@ class BindToDeviceDistributionTest } }; -PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { +PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { switch (family) { case AF_INET: - return static_cast( + return static_cast( reinterpret_cast(&addr)->sin_port); case AF_INET6: - return static_cast( + return static_cast( reinterpret_cast(&addr)->sin6_port); default: return PosixError(EINVAL, @@ -91,7 +91,7 @@ PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { } } -PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16 port) { +PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16_t port) { switch (family) { case AF_INET: reinterpret_cast(addr)->sin_port = port; @@ -157,7 +157,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) { getsockname(listener_fds[0].get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); } @@ -190,7 +190,7 @@ TEST_P(BindToDeviceDistributionTest, Tcp) { // cause the test to use absurd amounts of memory. // // See: https://tools.ietf.org/html/rfc2525#page-50 section 2.17 - uint16 data; + uint16_t data; EXPECT_THAT( RetryEINTR(recv)(fd.ValueOrDie().get(), &data, sizeof(data), 0), SyscallSucceedsWithValue(sizeof(data))); @@ -296,7 +296,7 @@ TEST_P(BindToDeviceDistributionTest, Udp) { getsockname(listener_fds[0].get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port)); ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); diff --git a/test/syscalls/linux/socket_generic.cc b/test/syscalls/linux/socket_generic.cc index ee9856f7f..e8f24a59e 100644 --- a/test/syscalls/linux/socket_generic.cc +++ b/test/syscalls/linux/socket_generic.cc @@ -507,7 +507,7 @@ TEST_P(AllSocketPairTest, SoRcvTimeoIsSetLargerArg) { struct timeval_with_extra { struct timeval tv; - int64 extra_data; + int64_t extra_data; } ABSL_ATTRIBUTE_PACKED; timeval_with_extra tv_extra; diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc index 12df2b35a..2f9821555 100644 --- a/test/syscalls/linux/socket_inet_loopback.cc +++ b/test/syscalls/linux/socket_inet_loopback.cc @@ -47,13 +47,13 @@ namespace { using ::testing::Gt; -PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { +PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { switch (family) { case AF_INET: - return static_cast( + return static_cast( reinterpret_cast(&addr)->sin_port); case AF_INET6: - return static_cast( + return static_cast( reinterpret_cast(&addr)->sin6_port); default: return PosixError(EINVAL, @@ -61,7 +61,7 @@ PosixErrorOr AddrPort(int family, sockaddr_storage const& addr) { } } -PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16 port) { +PosixError SetAddrPort(int family, sockaddr_storage* addr, uint16_t port) { switch (family) { case AF_INET: reinterpret_cast(addr)->sin_port = port; @@ -276,7 +276,7 @@ void tcpSimpleConnectTest(TestAddress const& listener, ASSERT_THAT(getsockname(listen_fd.get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Connect to the listening socket. @@ -339,7 +339,7 @@ TEST_P(SocketInetLoopbackTest, TCPListenClose) { ASSERT_THAT(getsockname(listen_fd.get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); DisableSave ds; // Too many system calls. @@ -400,7 +400,7 @@ TEST_P(SocketInetLoopbackTest, TCPbacklog) { ASSERT_THAT(getsockname(listen_fd.get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); int i = 0; while (1) { @@ -468,7 +468,7 @@ TEST_P(SocketInetLoopbackTest, TCPFinWait2Test_NoRandomSave) { reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Connect to the listening socket. @@ -576,7 +576,7 @@ TEST_P(SocketInetLoopbackTest, TCPLinger2TimeoutAfterClose_NoRandomSave) { reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Connect to the listening socket. @@ -650,7 +650,7 @@ TEST_P(SocketInetLoopbackTest, TCPResetAfterClose) { reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Connect to the listening socket. @@ -717,7 +717,7 @@ TEST_P(SocketInetLoopbackTest, TCPTimeWaitTest_NoRandomSave) { reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Connect to the listening socket. @@ -794,7 +794,7 @@ TEST_P(SocketInetLoopbackTest, AcceptedInheritsTCPUserTimeout) { reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - const uint16 port = + const uint16_t port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); // Set the userTimeout on the listening socket. @@ -898,7 +898,7 @@ TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread_NoRandomSave) { getsockname(listener_fds[0].get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port)); ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); @@ -935,7 +935,7 @@ TEST_P(SocketInetReusePortTest, TcpPortReuseMultiThread_NoRandomSave) { // cause the test to use absurd amounts of memory. // // See: https://tools.ietf.org/html/rfc2525#page-50 section 2.17 - uint16 data; + uint16_t data; EXPECT_THAT( RetryEINTR(recv)(fd.ValueOrDie().get(), &data, sizeof(data), 0), SyscallSucceedsWithValue(sizeof(data))); @@ -1022,7 +1022,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThread) { getsockname(listener_fds[0].get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port)); ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); @@ -1138,7 +1138,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort) { getsockname(listener_fds[0].get(), reinterpret_cast(&listen_addr), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(listener.family(), listen_addr)); ASSERT_NO_ERRNO(SetAddrPort(listener.family(), &listen_addr, port)); ASSERT_NO_ERRNO(SetAddrPort(connector.family(), &conn_addr, port)); @@ -1174,7 +1174,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort) { pollfds[i].events = POLLIN; } - std::map portToFD; + std::map portToFD; int received = 0; while (received < kConnectAttempts * 2) { @@ -1196,7 +1196,7 @@ TEST_P(SocketInetReusePortTest, UdpPortReuseMultiThreadShort) { fd, &data, sizeof(data), 0, reinterpret_cast(&addr), &addrlen), SyscallSucceedsWithValue(sizeof(data))); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(connector.family(), addr)); auto prev_port = portToFD.find(port); // Check that all packets from one client have been delivered to the @@ -1257,7 +1257,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedLoopbackOnlyReservesV4) { ASSERT_THAT(getsockname(fd_dual.get(), reinterpret_cast(&addr_dual), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); // Verify that we can still bind the v6 loopback on the same port. @@ -1309,7 +1309,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedAnyOnlyReservesV4) { ASSERT_THAT(getsockname(fd_dual.get(), reinterpret_cast(&addr_dual), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); // Verify that we can still bind the v6 loopback on the same port. @@ -1360,7 +1360,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, DualStackV6AnyReservesEverything) { ASSERT_THAT(getsockname(fd_dual.get(), reinterpret_cast(&addr_dual), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); // Verify that binding the v6 loopback with the same port fails. @@ -1419,7 +1419,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) { ASSERT_THAT(getsockname(fd_dual.get(), reinterpret_cast(&addr_dual), &addrlen), SyscallSucceeds()); - uint16 const port = + uint16_t const port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); // Verify that binding the v6 loopback with the same port fails. @@ -1498,7 +1498,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) { reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. @@ -1603,7 +1603,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReservedReuseAddr) { reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. @@ -1665,7 +1665,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) { reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. @@ -1794,7 +1794,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. @@ -1856,7 +1856,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) { reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. @@ -1988,7 +1988,7 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReservedReuseAddr) { reinterpret_cast(&connected_addr), &connected_addr_len), SyscallSucceeds()); - uint16 const ephemeral_port = + uint16_t const ephemeral_port = ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr.family(), connected_addr)); // Verify that we actually got an ephemeral port. diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index 4a8337159..ca597e267 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -223,7 +223,7 @@ TEST_P(IPUnboundSocketTest, CheckSkipECN) { TOSOption t = GetTOSOption(GetParam().domain); EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), SyscallSucceedsWithValue(0)); - int expect = static_cast(set); + int expect = static_cast(set); if (GetParam().protocol == IPPROTO_TCP) { expect &= ~INET_ECN_MASK; } @@ -267,7 +267,7 @@ TEST_P(IPUnboundSocketTest, SmallTOSOptionSize) { EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, i), SyscallSucceedsWithValue(0)); expect_tos = set; - expect_sz = sizeof(uint8); + expect_sz = sizeof(uint8_t); } else { EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, i), SyscallFailsWithErrno(EINVAL)); @@ -314,7 +314,7 @@ TEST_P(IPUnboundSocketTest, NegativeTOS) { SyscallSucceedsWithValue(0)); int expect; if (GetParam().domain == AF_INET) { - expect = static_cast(set); + expect = static_cast(set); if (GetParam().protocol == IPPROTO_TCP) { expect &= ~INET_ECN_MASK; } @@ -340,7 +340,7 @@ TEST_P(IPUnboundSocketTest, InvalidNegativeTOS) { if (GetParam().domain == AF_INET) { EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), SyscallSucceedsWithValue(0)); - expect = static_cast(set); + expect = static_cast(set); if (GetParam().protocol == IPPROTO_TCP) { expect &= ~INET_ECN_MASK; } diff --git a/test/syscalls/linux/socket_netdevice.cc b/test/syscalls/linux/socket_netdevice.cc index 689014a59..405dbbd73 100644 --- a/test/syscalls/linux/socket_netdevice.cc +++ b/test/syscalls/linux/socket_netdevice.cc @@ -70,14 +70,14 @@ TEST(NetdeviceTest, Netmask) { // netmask obtained via ioctl. FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE)); - uint32 port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); + uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); struct request { struct nlmsghdr hdr; struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); @@ -109,7 +109,7 @@ TEST(NetdeviceTest, Netmask) { struct ifaddrmsg *ifaddrmsg = reinterpret_cast(NLMSG_DATA(hdr)); - if (ifaddrmsg->ifa_index == static_cast(ifr.ifr_ifindex) && + if (ifaddrmsg->ifa_index == static_cast(ifr.ifr_ifindex) && ifaddrmsg->ifa_family == AF_INET) { prefixlen = ifaddrmsg->ifa_prefixlen; } @@ -120,7 +120,7 @@ TEST(NetdeviceTest, Netmask) { // Netmask is stored big endian in struct sockaddr_in, so we do the same for // comparison. - uint32 mask = 0xffffffff << (32 - prefixlen); + uint32_t mask = 0xffffffff << (32 - prefixlen); mask = absl::gbswap_32(mask); // Check that the loopback interface has the correct subnet mask. diff --git a/test/syscalls/linux/socket_netlink_route.cc b/test/syscalls/linux/socket_netlink_route.cc index 5612f1a13..ef567f512 100644 --- a/test/syscalls/linux/socket_netlink_route.cc +++ b/test/syscalls/linux/socket_netlink_route.cc @@ -116,14 +116,14 @@ void CheckGetLinkResponse(const struct nlmsghdr* hdr, int seq, int port) { TEST(NetlinkRouteTest, GetLinkDump) { FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE)); - uint32 port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); + uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); struct request { struct nlmsghdr hdr; struct ifinfomsg ifm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; req.hdr.nlmsg_len = sizeof(req); @@ -164,7 +164,7 @@ TEST(NetlinkRouteTest, MsgHdrMsgUnsuppType) { struct ifinfomsg ifm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; req.hdr.nlmsg_len = sizeof(req); @@ -198,7 +198,7 @@ TEST(NetlinkRouteTest, MsgHdrMsgTrunc) { struct ifinfomsg ifm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; req.hdr.nlmsg_len = sizeof(req); @@ -238,7 +238,7 @@ TEST(NetlinkRouteTest, MsgTruncMsgHdrMsgTrunc) { struct ifinfomsg ifm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; req.hdr.nlmsg_len = sizeof(req); @@ -274,7 +274,7 @@ TEST(NetlinkRouteTest, MsgTruncMsgHdrMsgTrunc) { TEST(NetlinkRouteTest, ControlMessageIgnored) { FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE)); - uint32 port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); + uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); struct request { struct nlmsghdr control_hdr; @@ -282,7 +282,7 @@ TEST(NetlinkRouteTest, ControlMessageIgnored) { struct ifinfomsg ifm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; @@ -310,14 +310,14 @@ TEST(NetlinkRouteTest, ControlMessageIgnored) { TEST(NetlinkRouteTest, GetAddrDump) { FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE)); - uint32 port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); + uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); struct request { struct nlmsghdr hdr; struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); @@ -371,14 +371,14 @@ TEST(NetlinkRouteTest, LookupAll) { TEST(NetlinkRouteTest, GetRouteDump) { FileDescriptor fd = ASSERT_NO_ERRNO_AND_VALUE(NetlinkBoundSocket(NETLINK_ROUTE)); - uint32 port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); + uint32_t port = ASSERT_NO_ERRNO_AND_VALUE(NetlinkPortID(fd.get())); struct request { struct nlmsghdr hdr; struct rtmsg rtm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req = {}; req.hdr.nlmsg_len = sizeof(req); @@ -454,7 +454,7 @@ TEST(NetlinkRouteTest, RecvmsgTrunc) { struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); @@ -531,7 +531,7 @@ TEST(NetlinkRouteTest, RecvmsgTruncPeek) { struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); @@ -611,7 +611,7 @@ TEST(NetlinkRouteTest, NoPasscredNoCreds) { struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); @@ -659,7 +659,7 @@ TEST(NetlinkRouteTest, PasscredCreds) { struct rtgenmsg rgm; }; - constexpr uint32 kSeq = 12345; + constexpr uint32_t kSeq = 12345; struct request req; req.hdr.nlmsg_len = sizeof(req); diff --git a/test/syscalls/linux/socket_netlink_util.cc b/test/syscalls/linux/socket_netlink_util.cc index 17f99c238..723f5d728 100644 --- a/test/syscalls/linux/socket_netlink_util.cc +++ b/test/syscalls/linux/socket_netlink_util.cc @@ -40,7 +40,7 @@ PosixErrorOr NetlinkBoundSocket(int protocol) { return std::move(fd); } -PosixErrorOr NetlinkPortID(int fd) { +PosixErrorOr NetlinkPortID(int fd) { struct sockaddr_nl addr; socklen_t addrlen = sizeof(addr); @@ -48,7 +48,7 @@ PosixErrorOr NetlinkPortID(int fd) { getsockname(fd, reinterpret_cast(&addr), &addrlen)); MaybeSave(); - return static_cast(addr.nl_pid); + return static_cast(addr.nl_pid); } PosixError NetlinkRequestResponse( diff --git a/test/syscalls/linux/socket_netlink_util.h b/test/syscalls/linux/socket_netlink_util.h index bd0c1d79b..76e772c48 100644 --- a/test/syscalls/linux/socket_netlink_util.h +++ b/test/syscalls/linux/socket_netlink_util.h @@ -30,7 +30,7 @@ namespace testing { PosixErrorOr NetlinkBoundSocket(int protocol); // Returns the port ID of the passed socket. -PosixErrorOr NetlinkPortID(int fd); +PosixErrorOr NetlinkPortID(int fd); // Send the passed request and call fn will all response netlink messages. PosixError NetlinkRequestResponse( diff --git a/test/syscalls/linux/socket_test_util.cc b/test/syscalls/linux/socket_test_util.cc index 2169ff1c6..eff7d577e 100644 --- a/test/syscalls/linux/socket_test_util.cc +++ b/test/syscalls/linux/socket_test_util.cc @@ -507,7 +507,7 @@ void TransferTest(int fd1, int fd2) { // Initializes the given buffer with random data. void RandomizeBuffer(char* ptr, size_t len) { - uint32 seed = time(nullptr); + uint32_t seed = time(nullptr); for (size_t i = 0; i < len; ++i) { ptr[i] = static_cast(rand_r(&seed)); } diff --git a/test/syscalls/linux/splice.cc b/test/syscalls/linux/splice.cc index 562b6a8d4..85232cb1f 100644 --- a/test/syscalls/linux/splice.cc +++ b/test/syscalls/linux/splice.cc @@ -139,7 +139,7 @@ TEST(SpliceTest, PipeOffsets) { // Event FDs may be used with splice without an offset. TEST(SpliceTest, FromEventFD) { // Open the input eventfd with an initial value so that it is readable. - constexpr uint64 kEventFDValue = 1; + constexpr uint64_t kEventFDValue = 1; int efd; ASSERT_THAT(efd = eventfd(kEventFDValue, 0), SyscallSucceeds()); const FileDescriptor in_fd(efd); diff --git a/test/syscalls/linux/stat.cc b/test/syscalls/linux/stat.cc index 6b259cb89..30de2f8ff 100644 --- a/test/syscalls/linux/stat.cc +++ b/test/syscalls/linux/stat.cc @@ -568,35 +568,35 @@ TEST(SimpleStatTest, AnonDeviceAllocatesUniqueInodesAcrossSaveRestore) { // struct kernel_statx_timestamp is a Linux statx_timestamp struct. struct kernel_statx_timestamp { - int64 tv_sec; - uint32 tv_nsec; - int32 __reserved; + int64_t tv_sec; + uint32_t tv_nsec; + int32_t __reserved; }; // struct kernel_statx is a Linux statx struct. Old versions of glibc do not // expose it. See include/uapi/linux/stat.h struct kernel_statx { - uint32 stx_mask; - uint32 stx_blksize; - uint64 stx_attributes; - uint32 stx_nlink; - uint32 stx_uid; - uint32 stx_gid; - uint16 stx_mode; - uint16 __spare0[1]; - uint64 stx_ino; - uint64 stx_size; - uint64 stx_blocks; - uint64 stx_attributes_mask; + uint32_t stx_mask; + uint32_t stx_blksize; + uint64_t stx_attributes; + uint32_t stx_nlink; + uint32_t stx_uid; + uint32_t stx_gid; + uint16_t stx_mode; + uint16_t __spare0[1]; + uint64_t stx_ino; + uint64_t stx_size; + uint64_t stx_blocks; + uint64_t stx_attributes_mask; struct kernel_statx_timestamp stx_atime; struct kernel_statx_timestamp stx_btime; struct kernel_statx_timestamp stx_ctime; struct kernel_statx_timestamp stx_mtime; - uint32 stx_rdev_major; - uint32 stx_rdev_minor; - uint32 stx_dev_major; - uint32 stx_dev_minor; - uint64 __spare2[14]; + uint32_t stx_rdev_major; + uint32_t stx_rdev_minor; + uint32_t stx_dev_major; + uint32_t stx_dev_minor; + uint64_t __spare2[14]; }; int statx(int dirfd, const char *pathname, int flags, unsigned int mask, diff --git a/test/syscalls/linux/sticky.cc b/test/syscalls/linux/sticky.cc index abcabaffb..7e73325bf 100644 --- a/test/syscalls/linux/sticky.cc +++ b/test/syscalls/linux/sticky.cc @@ -29,8 +29,8 @@ #include "test/util/test_util.h" #include "test/util/thread_util.h" -ABSL_FLAG(int32, scratch_uid, 65534, "first scratch UID"); -ABSL_FLAG(int32, scratch_gid, 65534, "first scratch GID"); +ABSL_FLAG(int32_t, scratch_uid, 65534, "first scratch UID"); +ABSL_FLAG(int32_t, scratch_gid, 65534, "first scratch GID"); namespace gvisor { namespace testing { diff --git a/test/syscalls/linux/sysret.cc b/test/syscalls/linux/sysret.cc index d98d6be91..819fa655a 100644 --- a/test/syscalls/linux/sysret.cc +++ b/test/syscalls/linux/sysret.cc @@ -26,8 +26,8 @@ namespace testing { namespace { -constexpr uint64 kNonCanonicalRip = 0xCCCC000000000000; -constexpr uint64 kNonCanonicalRsp = 0xFFFF000000000000; +constexpr uint64_t kNonCanonicalRip = 0xCCCC000000000000; +constexpr uint64_t kNonCanonicalRsp = 0xFFFF000000000000; class SysretTest : public ::testing::Test { protected: @@ -60,12 +60,12 @@ class SysretTest : public ::testing::Test { ASSERT_THAT(ptrace(PTRACE_DETACH, child_, 0, 0), SyscallSucceeds()); } - void SetRip(uint64 newrip) { + void SetRip(uint64_t newrip) { regs_.rip = newrip; ASSERT_THAT(ptrace(PTRACE_SETREGS, child_, 0, ®s_), SyscallSucceeds()); } - void SetRsp(uint64 newrsp) { + void SetRsp(uint64_t newrsp) { regs_.rsp = newrsp; ASSERT_THAT(ptrace(PTRACE_SETREGS, child_, 0, ®s_), SyscallSucceeds()); } diff --git a/test/syscalls/linux/tcp_socket.cc b/test/syscalls/linux/tcp_socket.cc index cb304d6f5..33a5ac66c 100644 --- a/test/syscalls/linux/tcp_socket.cc +++ b/test/syscalls/linux/tcp_socket.cc @@ -640,7 +640,7 @@ TEST_P(TcpSocketTest, Tiocinq) { size_t size = sizeof(buf); ASSERT_THAT(RetryEINTR(write)(s_, buf, size), SyscallSucceedsWithValue(size)); - uint32 seed = time(nullptr); + uint32_t seed = time(nullptr); const size_t max_chunk = size / 10; while (size > 0) { size_t chunk = (rand_r(&seed) % max_chunk) + 1; diff --git a/test/syscalls/linux/time.cc b/test/syscalls/linux/time.cc index 03e028f50..c7eead17e 100644 --- a/test/syscalls/linux/time.cc +++ b/test/syscalls/linux/time.cc @@ -28,7 +28,7 @@ constexpr long kFudgeSeconds = 5; // Mimics the time(2) wrapper from glibc prior to 2.15. time_t vsyscall_time(time_t* t) { - constexpr uint64 kVsyscallTimeEntry = 0xffffffffff600400; + constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400; return reinterpret_cast(kVsyscallTimeEntry)(t); } @@ -63,7 +63,7 @@ TEST(TimeTest, VsyscallTime_InvalidAddressSIGSEGV) { } int vsyscall_gettimeofday(struct timeval* tv, struct timezone* tz) { - constexpr uint64 kVsyscallGettimeofdayEntry = 0xffffffffff600000; + constexpr uint64_t kVsyscallGettimeofdayEntry = 0xffffffffff600000; return reinterpret_cast( kVsyscallGettimeofdayEntry)(tv, tz); } diff --git a/test/syscalls/linux/timerfd.cc b/test/syscalls/linux/timerfd.cc index d87dbc666..86ed87b7c 100644 --- a/test/syscalls/linux/timerfd.cc +++ b/test/syscalls/linux/timerfd.cc @@ -69,9 +69,9 @@ TEST_P(TimerfdTest, SingleShot) { // The timer should fire exactly once since the interval is zero. absl::SleepFor(kDelay + TimerSlack()); - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); EXPECT_EQ(1, val); } @@ -89,9 +89,9 @@ TEST_P(TimerfdTest, Periodic) { // Expect to see at least kPeriods expirations. More may occur due to the // timer slack, or due to delays from scheduling or save/restore. absl::SleepFor(kPeriods * kDelay + TimerSlack()); - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); EXPECT_GE(val, kPeriods); } @@ -106,9 +106,9 @@ TEST_P(TimerfdTest, BlockingRead) { SyscallSucceeds()); // read should block until the timer fires. - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); auto const end_time = absl::Now(); EXPECT_EQ(1, val); EXPECT_GE((end_time - start_time) + TimerSlack(), kDelay); @@ -122,8 +122,8 @@ TEST_P(TimerfdTest, NonblockingRead_NoRandomSave) { // Since the timer is initially disabled and has never fired, read should // return EAGAIN. - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), SyscallFailsWithErrno(EAGAIN)); DisableSave ds; // Timing-sensitive. @@ -135,19 +135,19 @@ TEST_P(TimerfdTest, NonblockingRead_NoRandomSave) { SyscallSucceeds()); // Since the timer has not yet fired, read should return EAGAIN. - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), SyscallFailsWithErrno(EAGAIN)); ds.reset(); // No longer timing-sensitive. // After the timer fires, read should indicate 1 expiration. absl::SleepFor(kDelay + TimerSlack()); - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); EXPECT_EQ(1, val); // The successful read should have reset the number of expirations. - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), SyscallFailsWithErrno(EAGAIN)); } @@ -179,8 +179,8 @@ TEST_P(TimerfdTest, BlockingPoll_SetTimeResetsExpirations) { its.it_value.tv_sec = 0; ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr), SyscallSucceeds()); - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), SyscallFailsWithErrno(EAGAIN)); } @@ -198,16 +198,16 @@ TEST_P(TimerfdTest, SetAbsoluteTime) { SyscallSucceeds()); absl::SleepFor(kDelay + TimerSlack()); - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); EXPECT_EQ(1, val); } TEST_P(TimerfdTest, IllegalReadWrite) { auto const tfd = ASSERT_NO_ERRNO_AND_VALUE(TimerfdCreate(GetParam(), TFD_NONBLOCK)); - uint64 val = 0; + uint64_t val = 0; EXPECT_THAT(PreadFd(tfd.get(), &val, sizeof(val), 0), SyscallFailsWithErrno(ESPIPE)); EXPECT_THAT(WriteFd(tfd.get(), &val, sizeof(val)), @@ -244,9 +244,9 @@ TEST(TimerfdClockRealtimeTest, ClockRealtime) { ASSERT_THAT(timerfd_settime(tfd.get(), /* flags = */ 0, &its, nullptr), SyscallSucceeds()); - uint64 val = 0; - ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64)), - SyscallSucceedsWithValue(sizeof(uint64))); + uint64_t val = 0; + ASSERT_THAT(ReadFd(tfd.get(), &val, sizeof(uint64_t)), + SyscallSucceedsWithValue(sizeof(uint64_t))); EXPECT_EQ(1, val); } diff --git a/test/syscalls/linux/udp_socket_test_cases.cc b/test/syscalls/linux/udp_socket_test_cases.cc index af94d7baa..a2f6ef8cc 100644 --- a/test/syscalls/linux/udp_socket_test_cases.cc +++ b/test/syscalls/linux/udp_socket_test_cases.cc @@ -34,7 +34,7 @@ namespace gvisor { namespace testing { // Gets a pointer to the port component of the given address. -uint16* Port(struct sockaddr_storage* addr) { +uint16_t* Port(struct sockaddr_storage* addr) { switch (addr->ss_family) { case AF_INET: { auto sin = reinterpret_cast(addr); @@ -331,7 +331,7 @@ TEST_P(UdpSocketTest, Connect) { EXPECT_EQ(memcmp(&peer, addr_[2], addrlen_), 0); } -void ConnectAny(AddressFamily family, int sockfd, uint16 port) { +void ConnectAny(AddressFamily family, int sockfd, uint16_t port) { struct sockaddr_storage addr = {}; // Precondition check. @@ -1398,7 +1398,7 @@ TEST_P(UdpSocketTest, SetAndReceiveTOS) { received_iov.iov_len = kDataLength; received_msg.msg_iov = &received_iov; received_msg.msg_iovlen = 1; - size_t cmsg_data_len = sizeof(int8); + size_t cmsg_data_len = sizeof(int8_t); if (sent_type == IPV6_TCLASS) { cmsg_data_len = sizeof(int); } @@ -1413,7 +1413,7 @@ TEST_P(UdpSocketTest, SetAndReceiveTOS) { EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len)); EXPECT_EQ(cmsg->cmsg_level, sent_level); EXPECT_EQ(cmsg->cmsg_type, sent_type); - int8 received_tos = 0; + int8_t received_tos = 0; memcpy(&received_tos, CMSG_DATA(cmsg), sizeof(received_tos)); EXPECT_EQ(received_tos, sent_tos); } @@ -1453,7 +1453,7 @@ TEST_P(UdpSocketTest, SendAndReceiveTOS) { sent_iov.iov_len = kDataLength; sent_msg.msg_iov = &sent_iov; sent_msg.msg_iovlen = 1; - size_t cmsg_data_len = sizeof(int8); + size_t cmsg_data_len = sizeof(int8_t); if (sent_level == SOL_IPV6) { sent_type = IPV6_TCLASS; cmsg_data_len = sizeof(int); @@ -1467,7 +1467,7 @@ TEST_P(UdpSocketTest, SendAndReceiveTOS) { sent_cmsg->cmsg_len = CMSG_LEN(cmsg_data_len); sent_cmsg->cmsg_level = sent_level; sent_cmsg->cmsg_type = sent_type; - *(int8*)CMSG_DATA(sent_cmsg) = sent_tos; + *(int8_t*)CMSG_DATA(sent_cmsg) = sent_tos; ASSERT_THAT(RetryEINTR(sendmsg)(t_, &sent_msg, 0), SyscallSucceedsWithValue(kDataLength)); @@ -1491,7 +1491,7 @@ TEST_P(UdpSocketTest, SendAndReceiveTOS) { EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(cmsg_data_len)); EXPECT_EQ(cmsg->cmsg_level, sent_level); EXPECT_EQ(cmsg->cmsg_type, sent_type); - int8 received_tos = 0; + int8_t received_tos = 0; memcpy(&received_tos, CMSG_DATA(cmsg), sizeof(received_tos)); EXPECT_EQ(received_tos, sent_tos); } diff --git a/test/syscalls/linux/uidgid.cc b/test/syscalls/linux/uidgid.cc index e0e39e5e3..6218fbce1 100644 --- a/test/syscalls/linux/uidgid.cc +++ b/test/syscalls/linux/uidgid.cc @@ -27,10 +27,10 @@ #include "test/util/thread_util.h" #include "test/util/uid_util.h" -ABSL_FLAG(int32, scratch_uid1, 65534, "first scratch UID"); -ABSL_FLAG(int32, scratch_uid2, 65533, "second scratch UID"); -ABSL_FLAG(int32, scratch_gid1, 65534, "first scratch GID"); -ABSL_FLAG(int32, scratch_gid2, 65533, "second scratch GID"); +ABSL_FLAG(int32_t, scratch_uid1, 65534, "first scratch UID"); +ABSL_FLAG(int32_t, scratch_uid2, 65533, "second scratch UID"); +ABSL_FLAG(int32_t, scratch_gid1, 65534, "first scratch GID"); +ABSL_FLAG(int32_t, scratch_gid2, 65533, "second scratch GID"); using ::testing::UnorderedElementsAreArray; diff --git a/test/syscalls/linux/utimes.cc b/test/syscalls/linux/utimes.cc index e7bae9c07..3a927a430 100644 --- a/test/syscalls/linux/utimes.cc +++ b/test/syscalls/linux/utimes.cc @@ -163,12 +163,12 @@ TEST(FutimesatTest, OnRelPath) { TEST(FutimesatTest, InvalidNsec) { auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile()); struct timeval times[4][2] = {{ - {0, 1}, // Valid - {1, static_cast(1e7)} // Invalid + {0, 1}, // Valid + {1, static_cast(1e7)} // Invalid }, { - {1, static_cast(1e7)}, // Invalid - {0, 1} // Valid + {1, static_cast(1e7)}, // Invalid + {0, 1} // Valid }, { {0, 1}, // Valid @@ -288,14 +288,15 @@ TEST(UtimeTest, ZeroAtimeandMtime) { TEST(UtimensatTest, InvalidNsec) { auto f = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile()); - struct timespec times[2][2] = {{ - {0, UTIME_OMIT}, // Valid - {2, static_cast(1e10)} // Invalid - }, - { - {2, static_cast(1e10)}, // Invalid - {0, UTIME_OMIT} // Valid - }}; + struct timespec times[2][2] = { + { + {0, UTIME_OMIT}, // Valid + {2, static_cast(1e10)} // Invalid + }, + { + {2, static_cast(1e10)}, // Invalid + {0, UTIME_OMIT} // Valid + }}; for (unsigned int i = 0; i < sizeof(times) / sizeof(times[0]); i++) { std::cout << "test:" << i << "\n"; diff --git a/test/syscalls/linux/vfork.cc b/test/syscalls/linux/vfork.cc index 153b3bd69..0aaba482d 100644 --- a/test/syscalls/linux/vfork.cc +++ b/test/syscalls/linux/vfork.cc @@ -51,7 +51,7 @@ constexpr absl::Duration kChildDelay = absl::Seconds(10); // errno, so kChildExitCode is chosen to be an unlikely errno: constexpr int kChildExitCode = 118; // ENOTNAM: Not a XENIX named type file -int64 MonotonicNow() { +int64_t MonotonicNow() { struct timespec now; TEST_PCHECK(clock_gettime(CLOCK_MONOTONIC, &now) == 0); return now.tv_sec * 1000000000ll + now.tv_nsec; @@ -62,7 +62,7 @@ TEST(VforkTest, ParentStopsUntilChildExits) { // N.B. Run the test in a single-threaded subprocess because // vfork is not safe in a multi-threaded process. - const int64 start = MonotonicNow(); + const int64_t start = MonotonicNow(); pid_t pid = vfork(); if (pid == 0) { @@ -72,7 +72,7 @@ TEST(VforkTest, ParentStopsUntilChildExits) { TEST_PCHECK_MSG(pid > 0, "vfork failed"); MaybeSave(); - const int64 end = MonotonicNow(); + const int64_t end = MonotonicNow(); absl::Duration dur = absl::Nanoseconds(end - start); @@ -92,7 +92,7 @@ TEST(VforkTest, ParentStopsUntilChildExecves_NoRandomSave) { char* const* const child_argv = owned_child_argv.get(); const auto test = [&] { - const int64 start = MonotonicNow(); + const int64_t start = MonotonicNow(); pid_t pid = vfork(); if (pid == 0) { @@ -104,7 +104,7 @@ TEST(VforkTest, ParentStopsUntilChildExecves_NoRandomSave) { // since the test expects an upper bound on the time spent // stopped. int saved_errno = errno; - const int64 end = MonotonicNow(); + const int64_t end = MonotonicNow(); errno = saved_errno; TEST_PCHECK_MSG(pid > 0, "vfork failed"); MaybeSave(); @@ -143,7 +143,7 @@ TEST(VforkTest, ExecedChildExitDoesntUnstopParent_NoRandomSave) { // pid1 exec'd and is now sleeping. SleepSafe(kChildDelay / 2); - const int64 start = MonotonicNow(); + const int64_t start = MonotonicNow(); pid_t pid2 = vfork(); if (pid2 == 0) { @@ -153,7 +153,7 @@ TEST(VforkTest, ExecedChildExitDoesntUnstopParent_NoRandomSave) { TEST_PCHECK_MSG(pid2 > 0, "vfork failed"); MaybeSave(); - const int64 end = MonotonicNow(); + const int64_t end = MonotonicNow(); absl::Duration dur = absl::Nanoseconds(end - start); diff --git a/test/syscalls/linux/vsyscall.cc b/test/syscalls/linux/vsyscall.cc index 99e8c6cea..2c2303358 100644 --- a/test/syscalls/linux/vsyscall.cc +++ b/test/syscalls/linux/vsyscall.cc @@ -25,7 +25,7 @@ namespace testing { namespace { time_t vsyscall_time(time_t* t) { - constexpr uint64 kVsyscallTimeEntry = 0xffffffffff600400; + constexpr uint64_t kVsyscallTimeEntry = 0xffffffffff600400; return reinterpret_cast(kVsyscallTimeEntry)(t); } diff --git a/test/syscalls/linux/wait.cc b/test/syscalls/linux/wait.cc index 709b87a21..944149d5e 100644 --- a/test/syscalls/linux/wait.cc +++ b/test/syscalls/linux/wait.cc @@ -64,7 +64,7 @@ static const size_t kStackSize = 2 * kPageSize; // The child thread created in CloneAndExit runs this function. // This child does not have the TLS setup, so it must not use glibc functions. int CloneChild(void* priv) { - int64 sleep = reinterpret_cast(priv); + int64_t sleep = reinterpret_cast(priv); SleepSafe(absl::Seconds(sleep)); // glibc's _exit(2) function wrapper will helpfully call exit_group(2), @@ -75,7 +75,7 @@ int CloneChild(void* priv) { // ForkAndExit forks a child process which exits with exit_code, after // sleeping for the specified duration (seconds). -pid_t ForkAndExit(int exit_code, int64 sleep) { +pid_t ForkAndExit(int exit_code, int64_t sleep) { pid_t child = fork(); if (child == 0) { SleepSafe(absl::Seconds(sleep)); @@ -84,16 +84,16 @@ pid_t ForkAndExit(int exit_code, int64 sleep) { return child; } -int64 clock_gettime_nsecs(clockid_t id) { +int64_t clock_gettime_nsecs(clockid_t id) { struct timespec ts; TEST_PCHECK(clock_gettime(id, &ts) == 0); return (ts.tv_sec * 1000000000 + ts.tv_nsec); } -void spin(int64 sec) { - int64 ns = sec * 1000000000; - int64 start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID); - int64 end = start + ns; +void spin(int64_t sec) { + int64_t ns = sec * 1000000000; + int64_t start = clock_gettime_nsecs(CLOCK_THREAD_CPUTIME_ID); + int64_t end = start + ns; do { constexpr int kLoopCount = 1000000; // large and arbitrary @@ -105,7 +105,7 @@ void spin(int64 sec) { // ForkSpinAndExit forks a child process which exits with exit_code, after // spinning for the specified duration (seconds). -pid_t ForkSpinAndExit(int exit_code, int64 spintime) { +pid_t ForkSpinAndExit(int exit_code, int64_t spintime) { pid_t child = fork(); if (child == 0) { spin(spintime); @@ -141,7 +141,7 @@ int FreeStack(uintptr_t addr) { // CloneAndExit clones a child thread, which exits with 0 after sleeping for // the specified duration (must be in seconds). extra_flags are ORed against // the standard clone(2) flags. -int CloneAndExit(int64 sleep, uintptr_t stack, int extra_flags) { +int CloneAndExit(int64_t sleep, uintptr_t stack, int extra_flags) { return clone(CloneChild, reinterpret_cast(stack), CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_VM | extra_flags, reinterpret_cast(sleep)); diff --git a/test/util/mount_util.h b/test/util/mount_util.h index 51119f22f..23eea51a2 100644 --- a/test/util/mount_util.h +++ b/test/util/mount_util.h @@ -33,9 +33,9 @@ namespace testing { // destroyed. inline PosixErrorOr Mount(const std::string &source, const std::string &target, - const std::string &fstype, uint64 mountflags, - const std::string &data, - uint64 umountflags) { + const std::string &fstype, + uint64_t mountflags, const std::string &data, + uint64_t umountflags) { if (mount(source.c_str(), target.c_str(), fstype.c_str(), mountflags, data.c_str()) == -1) { return PosixError(errno, "mount failed"); diff --git a/test/util/multiprocess_util.cc b/test/util/multiprocess_util.cc index ba601f300..8b676751b 100644 --- a/test/util/multiprocess_util.cc +++ b/test/util/multiprocess_util.cc @@ -135,7 +135,7 @@ PosixErrorOr ForkAndExec(const std::string& filename, return ForkAndExecHelper(exec_fn, fn, child, execve_errno); } -PosixErrorOr ForkAndExecveat(const int32 dirfd, +PosixErrorOr ForkAndExecveat(const int32_t dirfd, const std::string& pathname, const ExecveArray& argv, const ExecveArray& envv, const int flags, diff --git a/test/util/multiprocess_util.h b/test/util/multiprocess_util.h index 342e73a52..3e736261b 100644 --- a/test/util/multiprocess_util.h +++ b/test/util/multiprocess_util.h @@ -103,13 +103,14 @@ inline PosixErrorOr ForkAndExec(const std::string& filename, } // Equivalent to ForkAndExec, except using dirfd and flags with execveat. -PosixErrorOr ForkAndExecveat(int32 dirfd, const std::string& pathname, +PosixErrorOr ForkAndExecveat(int32_t dirfd, + const std::string& pathname, const ExecveArray& argv, const ExecveArray& envv, int flags, const std::function& fn, pid_t* child, int* execve_errno); -inline PosixErrorOr ForkAndExecveat(int32 dirfd, +inline PosixErrorOr ForkAndExecveat(int32_t dirfd, const std::string& pathname, const ExecveArray& argv, const ExecveArray& envv, int flags, diff --git a/test/util/proc_util.cc b/test/util/proc_util.cc index c81f363ef..34d636ba9 100644 --- a/test/util/proc_util.cc +++ b/test/util/proc_util.cc @@ -72,7 +72,7 @@ PosixErrorOr ParseProcMapsLine(absl::string_view line) { ASSIGN_OR_RETURN_ERRNO(map_entry.major, AtoiBase(device[0], 16)); ASSIGN_OR_RETURN_ERRNO(map_entry.minor, AtoiBase(device[1], 16)); - ASSIGN_OR_RETURN_ERRNO(map_entry.inode, Atoi(parts[4])); + ASSIGN_OR_RETURN_ERRNO(map_entry.inode, Atoi(parts[4])); if (parts.size() == 6) { // A filename is present. However, absl::StrSplit retained the whitespace // between the inode number and the filename. diff --git a/test/util/temp_path.cc b/test/util/temp_path.cc index f5096dd53..35aacb172 100644 --- a/test/util/temp_path.cc +++ b/test/util/temp_path.cc @@ -32,7 +32,7 @@ namespace testing { namespace { -std::atomic global_temp_file_number = ATOMIC_VAR_INIT(1); +std::atomic global_temp_file_number = ATOMIC_VAR_INIT(1); // Return a new temp filename, intended to be unique system-wide. // diff --git a/test/util/test_util.cc b/test/util/test_util.cc index 51f4b4539..848504c88 100644 --- a/test/util/test_util.cc +++ b/test/util/test_util.cc @@ -79,7 +79,7 @@ bool IsRunningWithHostinet() { #endif // defined(__x86_64__) CPUVendor GetCPUVendor() { - uint32 eax, ebx, ecx, edx; + uint32_t eax, ebx, ecx, edx; std::string vendor_str; // Get vendor string (issue CPUID with eax = 0) GETCPUID(eax, ebx, ecx, edx, 0, 0); @@ -179,36 +179,36 @@ PosixErrorOr> GetOpenFDs() { return ret_fds; } -PosixErrorOr Links(const std::string& path) { +PosixErrorOr Links(const std::string& path) { struct stat st; if (stat(path.c_str(), &st)) { return PosixError(errno, absl::StrCat("Failed to stat ", path)); } - return static_cast(st.st_nlink); + return static_cast(st.st_nlink); } void RandomizeBuffer(void* buffer, size_t len) { struct timespec ts = {}; clock_gettime(CLOCK_MONOTONIC, &ts); - uint32 seed = static_cast(ts.tv_nsec); + uint32_t seed = static_cast(ts.tv_nsec); char* const buf = static_cast(buffer); for (size_t i = 0; i < len; i++) { buf[i] = rand_r(&seed) % 255; } } -std::vector> GenerateIovecs(uint64 total_size, +std::vector> GenerateIovecs(uint64_t total_size, void* buf, size_t buflen) { std::vector> result; - for (uint64 offset = 0; offset < total_size;) { + for (uint64_t offset = 0; offset < total_size;) { auto& iovec_array = *result.emplace(result.end()); for (; offset < total_size && iovec_array.size() < IOV_MAX; offset += buflen) { struct iovec iov = {}; iov.iov_base = buf; - iov.iov_len = std::min(total_size - offset, buflen); + iov.iov_len = std::min(total_size - offset, buflen); iovec_array.push_back(iov); } } @@ -216,15 +216,15 @@ std::vector> GenerateIovecs(uint64 total_size, return result; } -uint64 Megabytes(uint64 n) { +uint64_t Megabytes(uint64_t n) { // Overflow check, upper 20 bits in n shouldn't be set. TEST_CHECK(!(0xfffff00000000000 & n)); return n << 20; } -bool Equivalent(uint64 current, uint64 target, double tolerance) { +bool Equivalent(uint64_t current, uint64_t target, double tolerance) { auto abs_diff = target > current ? target - current : current - target; - return abs_diff <= static_cast(tolerance * target); + return abs_diff <= static_cast(tolerance * target); } } // namespace testing diff --git a/test/util/test_util.h b/test/util/test_util.h index 6eb46ac76..b3235c7e3 100644 --- a/test/util/test_util.h +++ b/test/util/test_util.h @@ -264,7 +264,7 @@ std::ostream& operator<<(std::ostream& out, OpenFd const& ofd); PosixErrorOr> GetOpenFDs(); // Returns the number of hard links to a path. -PosixErrorOr Links(const std::string& path); +PosixErrorOr Links(const std::string& path); namespace internal { @@ -706,7 +706,7 @@ inline PosixErrorOr Atoi(absl::string_view str) { return ret; } -inline PosixErrorOr AtoiBase(absl::string_view str, int base) { +inline PosixErrorOr AtoiBase(absl::string_view str, int base) { if (base > 255 || base < 2) { return PosixError(EINVAL, "Invalid Base"); } @@ -737,16 +737,16 @@ inline PosixErrorOr Atof(absl::string_view str) { // Return the smallest number of iovec arrays that can be used to write // "total_bytes" number of bytes, each iovec writing one "buf". -std::vector> GenerateIovecs(uint64 total_size, +std::vector> GenerateIovecs(uint64_t total_size, void* buf, size_t buflen); // Returns bytes in 'n' megabytes. Used for readability. -uint64 Megabytes(uint64 n); +uint64_t Megabytes(uint64_t n); // Predicate for checking that a value is within some tolerance of another // value. Returns true iff current is in the range [target * (1 - tolerance), // target * (1 + tolerance)]. -bool Equivalent(uint64 current, uint64 target, double tolerance); +bool Equivalent(uint64_t current, uint64_t target, double tolerance); // Matcher wrapping the Equivalent predicate. MATCHER_P2(EquivalentWithin, target, tolerance, @@ -756,7 +756,7 @@ MATCHER_P2(EquivalentWithin, target, tolerance, if (target == 0) { *result_listener << ::absl::StreamFormat("difference of infinity%%"); } else { - int64 delta = static_cast(arg) - static_cast(target); + int64_t delta = static_cast(arg) - static_cast(target); double delta_percent = static_cast(delta) / static_cast(target) * 100; *result_listener << ::absl::StreamFormat("difference of %.2f%%", diff --git a/test/util/test_util_test.cc b/test/util/test_util_test.cc index 024304535..f42100374 100644 --- a/test/util/test_util_test.cc +++ b/test/util/test_util_test.cc @@ -171,7 +171,7 @@ MATCHER_P(IovecsListEq, expected, "") { return false; } - for (uint64 i = 0; i < expected.size(); ++i) { + for (uint64_t i = 0; i < expected.size(); ++i) { const std::vector& actual_iovecs = arg[i]; const std::vector& expected_iovecs = expected[i]; if (actual_iovecs.size() != expected_iovecs.size()) { @@ -181,7 +181,7 @@ MATCHER_P(IovecsListEq, expected, "") { return false; } - for (uint64 j = 0; j < expected_iovecs.size(); ++j) { + for (uint64_t j = 0; j < expected_iovecs.size(); ++j) { const struct iovec& actual_iov = actual_iovecs[j]; const struct iovec& expected_iov = expected_iovecs[j]; if (actual_iov.iov_base != expected_iov.iov_base) { -- cgit v1.2.3 From 9338854ea31059d6b6b5bf59a12512455b632f49 Mon Sep 17 00:00:00 2001 From: Nayana Bidari Date: Wed, 10 Jun 2020 15:05:20 -0700 Subject: Fix the error code for syscall test with null TOS. The setsockopt with nullptr can fail with either EFAULT or zero. PiperOrigin-RevId: 315777107 --- test/syscalls/linux/socket_ip_unbound.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index ca597e267..af8dda19c 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -377,8 +377,11 @@ TEST_P(IPUnboundSocketTest, NullTOS) { // // Linux's implementation would need fixing as passing a nullptr as optval // and non-zero optlen may not be valid. - EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, nullptr, set_sz), - SyscallSucceedsWithValue(0)); + // TODO(b/158666797): Combine the gVisor and linux cases for IPv6. + // Some kernel versions return EFAULT, so we handle both. + EXPECT_THAT( + setsockopt(socket->get(), t.level, t.option, nullptr, set_sz), + AnyOf(SyscallFailsWithErrno(EFAULT), SyscallSucceedsWithValue(0))); } } socklen_t get_sz = sizeof(int); -- cgit v1.2.3 From a085e562d0592bccc99e9e0380706a8025f70d53 Mon Sep 17 00:00:00 2001 From: Ian Gudger Date: Wed, 10 Jun 2020 23:48:03 -0700 Subject: Add support for SO_REUSEADDR to UDP sockets/endpoints. On UDP sockets, SO_REUSEADDR allows multiple sockets to bind to the same address, but only delivers packets to the most recently bound socket. This differs from the behavior of SO_REUSEADDR on TCP sockets. SO_REUSEADDR for TCP sockets will likely need an almost completely independent implementation. SO_REUSEADDR has some odd interactions with the similar SO_REUSEPORT. These interactions are tested fairly extensively and all but one particularly odd one (that honestly seems like a bug) behave the same on gVisor and Linux. PiperOrigin-RevId: 315844832 --- pkg/tcpip/ports/ports.go | 116 ++++++++++++++------- pkg/tcpip/stack/BUILD | 1 + pkg/tcpip/stack/stack.go | 12 +-- pkg/tcpip/stack/transport_demuxer.go | 95 ++++++++--------- pkg/tcpip/stack/transport_demuxer_test.go | 3 +- pkg/tcpip/stack/transport_test.go | 7 +- pkg/tcpip/transport/icmp/BUILD | 1 + pkg/tcpip/transport/icmp/endpoint.go | 7 +- pkg/tcpip/transport/tcp/accept.go | 4 +- pkg/tcpip/transport/tcp/endpoint.go | 19 +++- pkg/tcpip/transport/udp/endpoint.go | 42 +++++--- pkg/tcpip/transport/udp/forwarder.go | 3 +- .../linux/socket_bind_to_device_sequence.cc | 29 ++++-- test/syscalls/linux/socket_inet_loopback.cc | 9 -- test/syscalls/linux/socket_ip_udp_generic.cc | 6 -- test/syscalls/linux/socket_ip_unbound.cc | 56 +++++++--- test/syscalls/linux/socket_ipv4_udp_unbound.cc | 23 ++-- 17 files changed, 255 insertions(+), 178 deletions(-) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/pkg/tcpip/ports/ports.go b/pkg/tcpip/ports/ports.go index b937cb84b..edc29ad27 100644 --- a/pkg/tcpip/ports/ports.go +++ b/pkg/tcpip/ports/ports.go @@ -54,17 +54,27 @@ type Flags struct { LoadBalanced bool } -func (f Flags) bits() reuseFlag { - var rf reuseFlag +// Bits converts the Flags to their bitset form. +func (f Flags) Bits() BitFlags { + var rf BitFlags if f.MostRecent { - rf |= mostRecentFlag + rf |= MostRecentFlag } if f.LoadBalanced { - rf |= loadBalancedFlag + rf |= LoadBalancedFlag } return rf } +// Effective returns the effective behavior of a flag config. +func (f Flags) Effective() Flags { + e := f + if e.LoadBalanced && e.MostRecent { + e.MostRecent = false + } + return e +} + // PortManager manages allocating, reserving and releasing ports. type PortManager struct { mu sync.RWMutex @@ -78,56 +88,88 @@ type PortManager struct { hint uint32 } -type reuseFlag int +// BitFlags is a bitset representation of Flags. +type BitFlags uint32 const ( - mostRecentFlag reuseFlag = 1 << iota - loadBalancedFlag + // MostRecentFlag represents Flags.MostRecent. + MostRecentFlag BitFlags = 1 << iota + + // LoadBalancedFlag represents Flags.LoadBalanced. + LoadBalancedFlag + + // nextFlag is the value that the next added flag will have. + // + // It is used to calculate FlagMask below. It is also the number of + // valid flag states. nextFlag - flagMask = nextFlag - 1 + // FlagMask is a bit mask for BitFlags. + FlagMask = nextFlag - 1 ) -type portNode struct { - // refs stores the count for each possible flag combination. +// ToFlags converts the bitset into a Flags struct. +func (f BitFlags) ToFlags() Flags { + return Flags{ + MostRecent: f&MostRecentFlag != 0, + LoadBalanced: f&LoadBalancedFlag != 0, + } +} + +// FlagCounter counts how many references each flag combination has. +type FlagCounter struct { + // refs stores the count for each possible flag combination, (0 though + // FlagMask). refs [nextFlag]int } -func (p portNode) totalRefs() int { +// AddRef increases the reference count for a specific flag combination. +func (c *FlagCounter) AddRef(flags BitFlags) { + c.refs[flags]++ +} + +// DropRef decreases the reference count for a specific flag combination. +func (c *FlagCounter) DropRef(flags BitFlags) { + c.refs[flags]-- +} + +// TotalRefs calculates the total number of references for all flag +// combinations. +func (c FlagCounter) TotalRefs() int { var total int - for _, r := range p.refs { + for _, r := range c.refs { total += r } return total } -// flagRefs returns the number of references with all specified flags. -func (p portNode) flagRefs(flags reuseFlag) int { +// FlagRefs returns the number of references with all specified flags. +func (c FlagCounter) FlagRefs(flags BitFlags) int { var total int - for i, r := range p.refs { - if reuseFlag(i)&flags == flags { + for i, r := range c.refs { + if BitFlags(i)&flags == flags { total += r } } return total } -// allRefsHave returns if all references have all specified flags. -func (p portNode) allRefsHave(flags reuseFlag) bool { - for i, r := range p.refs { - if reuseFlag(i)&flags == flags && r > 0 { +// AllRefsHave returns if all references have all specified flags. +func (c FlagCounter) AllRefsHave(flags BitFlags) bool { + for i, r := range c.refs { + if BitFlags(i)&flags != flags && r > 0 { return false } } return true } -// intersectionRefs returns the set of flags shared by all references. -func (p portNode) intersectionRefs() reuseFlag { - intersection := flagMask - for i, r := range p.refs { +// IntersectionRefs returns the set of flags shared by all references. +func (c FlagCounter) IntersectionRefs() BitFlags { + intersection := FlagMask + for i, r := range c.refs { if r > 0 { - intersection &= reuseFlag(i) + intersection &= BitFlags(i) } } return intersection @@ -135,26 +177,26 @@ func (p portNode) intersectionRefs() reuseFlag { // deviceNode is never empty. When it has no elements, it is removed from the // map that references it. -type deviceNode map[tcpip.NICID]portNode +type deviceNode map[tcpip.NICID]FlagCounter // isAvailable checks whether binding is possible by device. If not binding to a -// device, check against all portNodes. If binding to a specific device, check +// device, check against all FlagCounters. If binding to a specific device, check // against the unspecified device and the provided device. // // If either of the port reuse flags is enabled on any of the nodes, all nodes // sharing a port must share at least one reuse flag. This matches Linux's // behavior. func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID) bool { - flagBits := flags.bits() + flagBits := flags.Bits() if bindToDevice == 0 { // Trying to binding all devices. if flagBits == 0 { // Can't bind because the (addr,port) is already bound. return false } - intersection := flagMask + intersection := FlagMask for _, p := range d { - i := p.intersectionRefs() + i := p.IntersectionRefs() intersection &= i if intersection&flagBits == 0 { // Can't bind because the (addr,port) was @@ -165,17 +207,17 @@ func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID) bool { return true } - intersection := flagMask + intersection := FlagMask if p, ok := d[0]; ok { - intersection = p.intersectionRefs() + intersection = p.IntersectionRefs() if intersection&flagBits == 0 { return false } } if p, ok := d[bindToDevice]; ok { - i := p.intersectionRefs() + i := p.IntersectionRefs() intersection &= i if intersection&flagBits == 0 { return false @@ -324,7 +366,7 @@ func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber if !s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice) { return false } - flagBits := flags.bits() + flagBits := flags.Bits() // Reserve port on all network protocols. for _, network := range networks { @@ -340,7 +382,7 @@ func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber m[addr] = d } n := d[bindToDevice] - n.refs[flagBits]++ + n.AddRef(flagBits) d[bindToDevice] = n } @@ -353,7 +395,7 @@ func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transp s.mu.Lock() defer s.mu.Unlock() - flagBits := flags.bits() + flagBits := flags.Bits() for _, network := range networks { desc := portDescriptor{network, transport, port} @@ -368,7 +410,7 @@ func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transp } n.refs[flagBits]-- d[bindToDevice] = n - if n.refs == [nextFlag]int{} { + if n.TotalRefs() == 0 { delete(d, bindToDevice) } if len(d) == 0 { diff --git a/pkg/tcpip/stack/BUILD b/pkg/tcpip/stack/BUILD index afca925ad..24f52b735 100644 --- a/pkg/tcpip/stack/BUILD +++ b/pkg/tcpip/stack/BUILD @@ -89,6 +89,7 @@ go_test( "//pkg/tcpip/link/loopback", "//pkg/tcpip/network/ipv4", "//pkg/tcpip/network/ipv6", + "//pkg/tcpip/ports", "//pkg/tcpip/transport/icmp", "//pkg/tcpip/transport/udp", "//pkg/waiter", diff --git a/pkg/tcpip/stack/stack.go b/pkg/tcpip/stack/stack.go index 648791302..a2190341c 100644 --- a/pkg/tcpip/stack/stack.go +++ b/pkg/tcpip/stack/stack.go @@ -1404,25 +1404,25 @@ func (s *Stack) RemoveWaker(nicID tcpip.NICID, addr tcpip.Address, waker *sleep. // transport dispatcher. Received packets that match the provided id will be // delivered to the given endpoint; specifying a nic is optional, but // nic-specific IDs have precedence over global ones. -func (s *Stack) RegisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, reusePort bool, bindToDevice tcpip.NICID) *tcpip.Error { - return s.demux.registerEndpoint(netProtos, protocol, id, ep, reusePort, bindToDevice) +func (s *Stack) RegisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { + return s.demux.registerEndpoint(netProtos, protocol, id, ep, flags, bindToDevice) } // UnregisterTransportEndpoint removes the endpoint with the given id from the // stack transport dispatcher. -func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) { - s.demux.unregisterEndpoint(netProtos, protocol, id, ep, bindToDevice) +func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { + s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice) } // StartTransportEndpointCleanup removes the endpoint with the given id from // the stack transport dispatcher. It also transitions it to the cleanup stage. -func (s *Stack) StartTransportEndpointCleanup(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) { +func (s *Stack) StartTransportEndpointCleanup(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { s.mu.Lock() defer s.mu.Unlock() s.cleanupEndpoints[ep] = struct{}{} - s.demux.unregisterEndpoint(netProtos, protocol, id, ep, bindToDevice) + s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice) } // CompleteTransportEndpointCleanup removes the endpoint from the cleanup diff --git a/pkg/tcpip/stack/transport_demuxer.go b/pkg/tcpip/stack/transport_demuxer.go index e09866405..118b449d5 100644 --- a/pkg/tcpip/stack/transport_demuxer.go +++ b/pkg/tcpip/stack/transport_demuxer.go @@ -15,7 +15,6 @@ package stack import ( - "container/heap" "fmt" "math/rand" @@ -23,6 +22,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/hash/jenkins" "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/ports" ) type protocolIDs struct { @@ -43,14 +43,14 @@ type transportEndpoints struct { // unregisterEndpoint unregisters the endpoint with the given id such that it // won't receive any more packets. -func (eps *transportEndpoints) unregisterEndpoint(id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) { +func (eps *transportEndpoints) unregisterEndpoint(id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { eps.mu.Lock() defer eps.mu.Unlock() epsByNIC, ok := eps.endpoints[id] if !ok { return } - if !epsByNIC.unregisterEndpoint(bindToDevice, ep) { + if !epsByNIC.unregisterEndpoint(bindToDevice, ep, flags) { return } delete(eps.endpoints, id) @@ -204,7 +204,7 @@ func (epsByNIC *endpointsByNIC) handleControlPacket(n *NIC, id TransportEndpoint // registerEndpoint returns true if it succeeds. It fails and returns // false if ep already has an element with the same key. -func (epsByNIC *endpointsByNIC) registerEndpoint(d *transportDemuxer, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, t TransportEndpoint, reusePort bool, bindToDevice tcpip.NICID) *tcpip.Error { +func (epsByNIC *endpointsByNIC) registerEndpoint(d *transportDemuxer, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, t TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { epsByNIC.mu.Lock() defer epsByNIC.mu.Unlock() @@ -214,23 +214,22 @@ func (epsByNIC *endpointsByNIC) registerEndpoint(d *transportDemuxer, netProto t demux: d, netProto: netProto, transProto: transProto, - reuse: reusePort, } epsByNIC.endpoints[bindToDevice] = multiPortEp } - return multiPortEp.singleRegisterEndpoint(t, reusePort) + return multiPortEp.singleRegisterEndpoint(t, flags) } // unregisterEndpoint returns true if endpointsByNIC has to be unregistered. -func (epsByNIC *endpointsByNIC) unregisterEndpoint(bindToDevice tcpip.NICID, t TransportEndpoint) bool { +func (epsByNIC *endpointsByNIC) unregisterEndpoint(bindToDevice tcpip.NICID, t TransportEndpoint, flags ports.Flags) bool { epsByNIC.mu.Lock() defer epsByNIC.mu.Unlock() multiPortEp, ok := epsByNIC.endpoints[bindToDevice] if !ok { return false } - if multiPortEp.unregisterEndpoint(t) { + if multiPortEp.unregisterEndpoint(t, flags) { delete(epsByNIC.endpoints, bindToDevice) } return len(epsByNIC.endpoints) == 0 @@ -279,10 +278,10 @@ func newTransportDemuxer(stack *Stack) *transportDemuxer { // registerEndpoint registers the given endpoint with the dispatcher such that // packets that match the endpoint ID are delivered to it. -func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, reusePort bool, bindToDevice tcpip.NICID) *tcpip.Error { +func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { for i, n := range netProtos { - if err := d.singleRegisterEndpoint(n, protocol, id, ep, reusePort, bindToDevice); err != nil { - d.unregisterEndpoint(netProtos[:i], protocol, id, ep, bindToDevice) + if err := d.singleRegisterEndpoint(n, protocol, id, ep, flags, bindToDevice); err != nil { + d.unregisterEndpoint(netProtos[:i], protocol, id, ep, flags, bindToDevice) return err } } @@ -290,35 +289,6 @@ func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNum return nil } -type transportEndpointHeap []TransportEndpoint - -var _ heap.Interface = (*transportEndpointHeap)(nil) - -func (h *transportEndpointHeap) Len() int { - return len(*h) -} - -func (h *transportEndpointHeap) Less(i, j int) bool { - return (*h)[i].UniqueID() < (*h)[j].UniqueID() -} - -func (h *transportEndpointHeap) Swap(i, j int) { - (*h)[i], (*h)[j] = (*h)[j], (*h)[i] -} - -func (h *transportEndpointHeap) Push(x interface{}) { - *h = append(*h, x.(TransportEndpoint)) -} - -func (h *transportEndpointHeap) Pop() interface{} { - old := *h - n := len(old) - x := old[n-1] - old[n-1] = nil - *h = old[:n-1] - return x -} - // multiPortEndpoint is a container for TransportEndpoints which are bound to // the same pair of address and port. endpointsArr always has at least one // element. @@ -334,9 +304,10 @@ type multiPortEndpoint struct { netProto tcpip.NetworkProtocolNumber transProto tcpip.TransportProtocolNumber - endpoints transportEndpointHeap - // reuse indicates if more than one endpoint is allowed. - reuse bool + // endpoints stores the transport endpoints in the order in which they + // were bound. This is required for UDP SO_REUSEADDR. + endpoints []TransportEndpoint + flags ports.FlagCounter } func (ep *multiPortEndpoint) transportEndpoints() []TransportEndpoint { @@ -362,6 +333,10 @@ func selectEndpoint(id TransportEndpointID, mpep *multiPortEndpoint, seed uint32 return mpep.endpoints[0] } + if mpep.flags.IntersectionRefs().ToFlags().Effective().MostRecent { + return mpep.endpoints[len(mpep.endpoints)-1] + } + payload := []byte{ byte(id.LocalPort), byte(id.LocalPort >> 8), @@ -401,40 +376,47 @@ func (ep *multiPortEndpoint) handlePacketAll(r *Route, id TransportEndpointID, p // singleRegisterEndpoint tries to add an endpoint to the multiPortEndpoint // list. The list might be empty already. -func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, reusePort bool) *tcpip.Error { +func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, flags ports.Flags) *tcpip.Error { ep.mu.Lock() defer ep.mu.Unlock() + bits := flags.Bits() + if len(ep.endpoints) != 0 { // If it was previously bound, we need to check if we can bind again. - if !ep.reuse || !reusePort { + if ep.flags.TotalRefs() > 0 && bits&ep.flags.IntersectionRefs() == 0 { return tcpip.ErrPortInUse } } - heap.Push(&ep.endpoints, t) + ep.endpoints = append(ep.endpoints, t) + ep.flags.AddRef(bits) return nil } // unregisterEndpoint returns true if multiPortEndpoint has to be unregistered. -func (ep *multiPortEndpoint) unregisterEndpoint(t TransportEndpoint) bool { +func (ep *multiPortEndpoint) unregisterEndpoint(t TransportEndpoint, flags ports.Flags) bool { ep.mu.Lock() defer ep.mu.Unlock() for i, endpoint := range ep.endpoints { if endpoint == t { - heap.Remove(&ep.endpoints, i) + copy(ep.endpoints[i:], ep.endpoints[i+1:]) + ep.endpoints[len(ep.endpoints)-1] = nil + ep.endpoints = ep.endpoints[:len(ep.endpoints)-1] + + ep.flags.DropRef(flags.Bits()) break } } return len(ep.endpoints) == 0 } -func (d *transportDemuxer) singleRegisterEndpoint(netProto tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, reusePort bool, bindToDevice tcpip.NICID) *tcpip.Error { +func (d *transportDemuxer) singleRegisterEndpoint(netProto tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { if id.RemotePort != 0 { - // TODO(eyalsoha): Why? - reusePort = false + // SO_REUSEPORT only applies to bound/listening endpoints. + flags.LoadBalanced = false } eps, ok := d.protocol[protocolIDs{netProto, protocol}] @@ -454,15 +436,20 @@ func (d *transportDemuxer) singleRegisterEndpoint(netProto tcpip.NetworkProtocol eps.endpoints[id] = epsByNIC } - return epsByNIC.registerEndpoint(d, netProto, protocol, ep, reusePort, bindToDevice) + return epsByNIC.registerEndpoint(d, netProto, protocol, ep, flags, bindToDevice) } // unregisterEndpoint unregisters the endpoint with the given id such that it // won't receive any more packets. -func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, bindToDevice tcpip.NICID) { +func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { + if id.RemotePort != 0 { + // SO_REUSEPORT only applies to bound/listening endpoints. + flags.LoadBalanced = false + } + for _, n := range netProtos { if eps, ok := d.protocol[protocolIDs{n, protocol}]; ok { - eps.unregisterEndpoint(id, ep, bindToDevice) + eps.unregisterEndpoint(id, ep, flags, bindToDevice) } } } diff --git a/pkg/tcpip/stack/transport_demuxer_test.go b/pkg/tcpip/stack/transport_demuxer_test.go index 67d778137..73dada928 100644 --- a/pkg/tcpip/stack/transport_demuxer_test.go +++ b/pkg/tcpip/stack/transport_demuxer_test.go @@ -25,6 +25,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/link/channel" "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" + "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" @@ -195,7 +196,7 @@ func TestTransportDemuxerRegister(t *testing.T) { if !ok { t.Fatalf("%T does not implement stack.TransportEndpoint", ep) } - if got, want := s.RegisterTransportEndpoint(0, []tcpip.NetworkProtocolNumber{test.proto}, udp.ProtocolNumber, stack.TransportEndpointID{}, tEP, false, 0), test.want; got != want { + if got, want := s.RegisterTransportEndpoint(0, []tcpip.NetworkProtocolNumber{test.proto}, udp.ProtocolNumber, stack.TransportEndpointID{}, tEP, ports.Flags{}, 0), test.want; got != want { t.Fatalf("s.RegisterTransportEndpoint(...) = %s, want %s", got, want) } }) diff --git a/pkg/tcpip/stack/transport_test.go b/pkg/tcpip/stack/transport_test.go index ad61c09d6..7e8b84867 100644 --- a/pkg/tcpip/stack/transport_test.go +++ b/pkg/tcpip/stack/transport_test.go @@ -21,6 +21,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/buffer" "gvisor.dev/gvisor/pkg/tcpip/link/channel" "gvisor.dev/gvisor/pkg/tcpip/link/loopback" + "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/waiter" ) @@ -154,7 +155,7 @@ func (f *fakeTransportEndpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { // Try to register so that we can start receiving packets. f.ID.RemoteAddress = addr.Addr - err = f.stack.RegisterTransportEndpoint(0, []tcpip.NetworkProtocolNumber{fakeNetNumber}, fakeTransNumber, f.ID, f, false /* reuse */, 0 /* bindToDevice */) + err = f.stack.RegisterTransportEndpoint(0, []tcpip.NetworkProtocolNumber{fakeNetNumber}, fakeTransNumber, f.ID, f, ports.Flags{}, 0 /* bindToDevice */) if err != nil { return err } @@ -199,8 +200,8 @@ func (f *fakeTransportEndpoint) Bind(a tcpip.FullAddress) *tcpip.Error { fakeTransNumber, stack.TransportEndpointID{LocalAddress: a.Addr}, f, - false, /* reuse */ - 0, /* bindtoDevice */ + ports.Flags{}, + 0, /* bindtoDevice */ ); err != nil { return err } diff --git a/pkg/tcpip/transport/icmp/BUILD b/pkg/tcpip/transport/icmp/BUILD index 9ce625c17..7e5c79776 100644 --- a/pkg/tcpip/transport/icmp/BUILD +++ b/pkg/tcpip/transport/icmp/BUILD @@ -31,6 +31,7 @@ go_library( "//pkg/tcpip", "//pkg/tcpip/buffer", "//pkg/tcpip/header", + "//pkg/tcpip/ports", "//pkg/tcpip/stack", "//pkg/tcpip/transport/raw", "//pkg/tcpip/transport/tcp", diff --git a/pkg/tcpip/transport/icmp/endpoint.go b/pkg/tcpip/transport/icmp/endpoint.go index 57e0a069b..8ce294002 100644 --- a/pkg/tcpip/transport/icmp/endpoint.go +++ b/pkg/tcpip/transport/icmp/endpoint.go @@ -19,6 +19,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/buffer" "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/waiter" ) @@ -110,7 +111,7 @@ func (e *endpoint) Close() { e.shutdownFlags = tcpip.ShutdownRead | tcpip.ShutdownWrite switch e.state { case stateBound, stateConnected: - e.stack.UnregisterTransportEndpoint(e.RegisterNICID, []tcpip.NetworkProtocolNumber{e.NetProto}, e.TransProto, e.ID, e, 0 /* bindToDevice */) + e.stack.UnregisterTransportEndpoint(e.RegisterNICID, []tcpip.NetworkProtocolNumber{e.NetProto}, e.TransProto, e.ID, e, ports.Flags{}, 0 /* bindToDevice */) } // Close the receive list and drain it. @@ -607,14 +608,14 @@ func (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.Networ if id.LocalPort != 0 { // The endpoint already has a local port, just attempt to // register it. - err := e.stack.RegisterTransportEndpoint(nicID, netProtos, e.TransProto, id, e, false /* reuse */, 0 /* bindToDevice */) + err := e.stack.RegisterTransportEndpoint(nicID, netProtos, e.TransProto, id, e, ports.Flags{}, 0 /* bindToDevice */) return id, err } // We need to find a port for the endpoint. _, err := e.stack.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) { id.LocalPort = p - err := e.stack.RegisterTransportEndpoint(nicID, netProtos, e.TransProto, id, e, false /* reuse */, 0 /* bindtodevice */) + err := e.stack.RegisterTransportEndpoint(nicID, netProtos, e.TransProto, id, e, ports.Flags{}, 0 /* bindtodevice */) switch err { case nil: return true, nil diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index e6a23c978..ad197e8db 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -27,6 +27,7 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/seqnum" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/waiter" @@ -238,13 +239,14 @@ func (l *listenContext) createConnectingEndpoint(s *segment, iss seqnum.Value, i n.mu.Lock() // Register new endpoint so that packets are routed to it. - if err := n.stack.RegisterTransportEndpoint(n.boundNICID, n.effectiveNetProtos, ProtocolNumber, n.ID, n, n.reusePort, n.boundBindToDevice); err != nil { + if err := n.stack.RegisterTransportEndpoint(n.boundNICID, n.effectiveNetProtos, ProtocolNumber, n.ID, n, ports.Flags{LoadBalanced: n.reusePort}, n.boundBindToDevice); err != nil { n.mu.Unlock() n.Close() return nil, err } n.isRegistered = true + n.registeredReusePort = n.reusePort return n, nil } diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go index 19f7bf449..6e4d607da 100644 --- a/pkg/tcpip/transport/tcp/endpoint.go +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -465,6 +465,10 @@ type endpoint struct { // reusePort is set to true if SO_REUSEPORT is enabled. reusePort bool + // registeredReusePort is set if the current endpoint registration was + // done with SO_REUSEPORT enabled. + registeredReusePort bool + // bindToDevice is set to the NIC on which to bind or disabled if 0. bindToDevice tcpip.NICID @@ -1021,8 +1025,9 @@ func (e *endpoint) closeNoShutdownLocked() { // in Listen() when trying to register. if e.EndpointState() == StateListen && e.isPortReserved { if e.isRegistered { - e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundBindToDevice) + e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.registeredReusePort}, e.boundBindToDevice) e.isRegistered = false + e.registeredReusePort = false } e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) @@ -1086,8 +1091,9 @@ func (e *endpoint) cleanupLocked() { e.workerCleanup = false if e.isRegistered { - e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundBindToDevice) + e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.registeredReusePort}, e.boundBindToDevice) e.isRegistered = false + e.registeredReusePort = false } if e.isPortReserved { @@ -2088,10 +2094,11 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc if e.ID.LocalPort != 0 { // The endpoint is bound to a port, attempt to register it. - err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, e.ID, e, e.reusePort, e.boundBindToDevice) + err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.reusePort}, e.boundBindToDevice) if err != nil { return err } + e.registeredReusePort = e.reusePort } else { // The endpoint doesn't have a local port yet, so try to get // one. Make sure that it isn't one that will result in the same @@ -2123,12 +2130,13 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc id := e.ID id.LocalPort = p - switch e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.reusePort, e.bindToDevice) { + switch e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, ports.Flags{LoadBalanced: e.reusePort}, e.bindToDevice) { case nil: // Port picking successful. Save the details of // the selected port. e.ID = id e.boundBindToDevice = e.bindToDevice + e.registeredReusePort = e.reusePort return true, nil case tcpip.ErrPortInUse: return false, nil @@ -2326,12 +2334,13 @@ func (e *endpoint) listen(backlog int) *tcpip.Error { } // Register the endpoint. - if err := e.stack.RegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.reusePort, e.boundBindToDevice); err != nil { + if err := e.stack.RegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.reusePort}, e.boundBindToDevice); err != nil { return err } e.isRegistered = true e.setEndpointState(StateListen) + e.registeredReusePort = e.reusePort // The channel may be non-nil when we're restoring the endpoint, and it // may be pre-populated with some previously accepted (but not Accepted) diff --git a/pkg/tcpip/transport/udp/endpoint.go b/pkg/tcpip/transport/udp/endpoint.go index c5e3c73ef..df5efbf6a 100644 --- a/pkg/tcpip/transport/udp/endpoint.go +++ b/pkg/tcpip/transport/udp/endpoint.go @@ -103,7 +103,7 @@ type endpoint struct { multicastAddr tcpip.Address multicastNICID tcpip.NICID multicastLoop bool - reusePort bool + portFlags ports.Flags bindToDevice tcpip.NICID broadcast bool @@ -214,7 +214,7 @@ func (e *endpoint) Close() { switch e.state { case StateBound, StateConnected: - e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundBindToDevice) + e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice) e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) e.boundBindToDevice = 0 e.boundPortFlags = ports.Flags{} @@ -558,10 +558,13 @@ func (e *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error { e.mu.Unlock() case tcpip.ReuseAddressOption: + e.mu.Lock() + e.portFlags.MostRecent = v + e.mu.Unlock() case tcpip.ReusePortOption: e.mu.Lock() - e.reusePort = v + e.portFlags.LoadBalanced = v e.mu.Unlock() case tcpip.V6OnlyOption: @@ -795,11 +798,15 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) { return v, nil case tcpip.ReuseAddressOption: - return false, nil + e.mu.RLock() + v := e.portFlags.MostRecent + e.mu.RUnlock() + + return v, nil case tcpip.ReusePortOption: e.mu.RLock() - v := e.reusePort + v := e.portFlags.LoadBalanced e.mu.RUnlock() return v, nil @@ -968,6 +975,11 @@ func (e *endpoint) Disconnect() *tcpip.Error { id stack.TransportEndpointID btd tcpip.NICID ) + + // We change this value below and we need the old value to unregister + // the endpoint. + boundPortFlags := e.boundPortFlags + // Exclude ephemerally bound endpoints. if e.BindNICID != 0 || e.ID.LocalAddress == "" { var err *tcpip.Error @@ -980,16 +992,17 @@ func (e *endpoint) Disconnect() *tcpip.Error { return err } e.state = StateBound + boundPortFlags = e.boundPortFlags } else { if e.ID.LocalPort != 0 { // Release the ephemeral port. - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, boundPortFlags, e.boundBindToDevice) e.boundPortFlags = ports.Flags{} } e.state = StateInitial } - e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundBindToDevice) + e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, boundPortFlags, e.boundBindToDevice) e.ID = id e.boundBindToDevice = btd e.route.Release() @@ -1061,6 +1074,8 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { } } + oldPortFlags := e.boundPortFlags + id, btd, err := e.registerWithStack(nicID, netProtos, id) if err != nil { return err @@ -1068,7 +1083,7 @@ func (e *endpoint) Connect(addr tcpip.FullAddress) *tcpip.Error { // Remove the old registration. if e.ID.LocalPort != 0 { - e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundBindToDevice) + e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, oldPortFlags, e.boundBindToDevice) } e.ID = id @@ -1132,20 +1147,15 @@ func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { func (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, tcpip.NICID, *tcpip.Error) { if e.ID.LocalPort == 0 { - flags := ports.Flags{ - LoadBalanced: e.reusePort, - // FIXME(b/129164367): Support SO_REUSEADDR. - MostRecent: false, - } - port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, flags, e.bindToDevice) + port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice) if err != nil { return id, e.bindToDevice, err } - e.boundPortFlags = flags id.LocalPort = port } + e.boundPortFlags = e.portFlags - err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.reusePort, e.bindToDevice) + err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.boundPortFlags, e.bindToDevice) if err != nil { e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, e.bindToDevice) e.boundPortFlags = ports.Flags{} diff --git a/pkg/tcpip/transport/udp/forwarder.go b/pkg/tcpip/transport/udp/forwarder.go index 7abfa0ed2..c67e0ba95 100644 --- a/pkg/tcpip/transport/udp/forwarder.go +++ b/pkg/tcpip/transport/udp/forwarder.go @@ -73,7 +73,7 @@ func (r *ForwarderRequest) ID() stack.TransportEndpointID { // CreateEndpoint creates a connected UDP endpoint for the session request. func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, *tcpip.Error) { ep := newEndpoint(r.stack, r.route.NetProto, queue) - if err := r.stack.RegisterTransportEndpoint(r.route.NICID(), []tcpip.NetworkProtocolNumber{r.route.NetProto}, ProtocolNumber, r.id, ep, ep.reusePort, ep.bindToDevice); err != nil { + if err := r.stack.RegisterTransportEndpoint(r.route.NICID(), []tcpip.NetworkProtocolNumber{r.route.NetProto}, ProtocolNumber, r.id, ep, ep.portFlags, ep.bindToDevice); err != nil { ep.Close() return nil, err } @@ -82,6 +82,7 @@ func (r *ForwarderRequest) CreateEndpoint(queue *waiter.Queue) (tcpip.Endpoint, ep.route = r.route.Clone() ep.dstPort = r.id.RemotePort ep.RegisterNICID = r.route.NICID() + ep.boundPortFlags = ep.portFlags ep.state = StateConnected diff --git a/test/syscalls/linux/socket_bind_to_device_sequence.cc b/test/syscalls/linux/socket_bind_to_device_sequence.cc index 1967329ee..4b2cb0606 100644 --- a/test/syscalls/linux/socket_bind_to_device_sequence.cc +++ b/test/syscalls/linux/socket_bind_to_device_sequence.cc @@ -363,8 +363,9 @@ TEST_P(BindToDeviceSequenceTest, BindTwiceWithReuseOnce) { } TEST_P(BindToDeviceSequenceTest, BindWithReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); + // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. + SKIP_IF(IsRunningOnGvisor() && + (GetParam().type & SOCK_STREAM) == SOCK_STREAM); ASSERT_NO_FATAL_FAILURE( BindSocket(/* reusePort */ false, /* reuse_addr */ true)); @@ -405,8 +406,9 @@ TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReusePort) { } TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); + // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. + SKIP_IF(IsRunningOnGvisor() && + (GetParam().type & SOCK_STREAM) == SOCK_STREAM); ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true, /* reuse_addr */ true, @@ -434,8 +436,9 @@ TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReusePort) { } TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); + // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. + SKIP_IF(IsRunningOnGvisor() && + (GetParam().type & SOCK_STREAM) == SOCK_STREAM); ASSERT_NO_FATAL_FAILURE(BindSocket( /* reuse_port */ true, /* reuse_addr */ true, /* bind_to_device */ 0)); @@ -469,10 +472,20 @@ TEST_P(BindToDeviceSequenceTest, BindReuseAddrThenReuseAddr) { /* bind_to_device */ 0, EADDRINUSE)); } -// This behavior seems like a bug? TEST_P(BindToDeviceSequenceTest, BindReuseAddrThenReuseAddrReusePortThenReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. + // The behavior described in this test seems like a Linux bug. It doesn't + // make any sense and it is unlikely that any applications rely on it. + // + // Both SO_REUSEADDR and SO_REUSEPORT allow binding multiple UDP sockets to + // the same address and deliver each packet to exactly one of the bound + // sockets. If both are enabled, one of the strategies is selected to route + // packets. The strategy is selected dynamically based on the settings of the + // currently bound sockets. Usually, the strategy is selected based on the + // common setting (SO_REUSEADDR or SO_REUSEPORT) amongst the sockets, but for + // some reason, Linux allows binding sets of sockets with no overlapping + // settings in some situations. In this case, it is not obvious which strategy + // would be selected as the configured setting is a contradiction. SKIP_IF(IsRunningOnGvisor()); ASSERT_NO_FATAL_FAILURE(BindSocket( diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc index fa890ec98..a914d9a12 100644 --- a/test/syscalls/linux/socket_inet_loopback.cc +++ b/test/syscalls/linux/socket_inet_loopback.cc @@ -1900,9 +1900,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) { TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReservedReuseAddr) { auto const& param = GetParam(); - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM); - // Bind the v6 loopback on a dual stack socket. TestAddress const& test_addr = V6Loopback(); sockaddr_storage bound_addr = test_addr.addr; @@ -2091,9 +2088,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReservedResueAddr) { auto const& param = GetParam(); - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM); - // Bind the v4 loopback on a dual stack socket. TestAddress const& test_addr = V4MappedLoopback(); sockaddr_storage bound_addr = test_addr.addr; @@ -2283,9 +2277,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) { TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReservedReuseAddr) { auto const& param = GetParam(); - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_DGRAM); - // Bind the v4 loopback on a v4 socket. TestAddress const& test_addr = V4Loopback(); sockaddr_storage bound_addr = test_addr.addr; diff --git a/test/syscalls/linux/socket_ip_udp_generic.cc b/test/syscalls/linux/socket_ip_udp_generic.cc index 1c533fdf2..edb86aded 100644 --- a/test/syscalls/linux/socket_ip_udp_generic.cc +++ b/test/syscalls/linux/socket_ip_udp_generic.cc @@ -225,9 +225,6 @@ TEST_P(UDPSocketPairTest, ReuseAddrDefault) { TEST_P(UDPSocketPairTest, SetReuseAddr) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, sizeof(kSockOptOn)), SyscallSucceeds()); @@ -292,9 +289,6 @@ TEST_P(UDPSocketPairTest, SetReusePort) { TEST_P(UDPSocketPairTest, SetReuseAddrReusePort) { auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair()); - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - ASSERT_THAT(setsockopt(sockets->first_fd(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, sizeof(kSockOptOn)), SyscallSucceeds()); diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index af8dda19c..5536ab53a 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -157,7 +157,7 @@ TEST_P(IPUnboundSocketTest, TOSDefault) { int get = -1; socklen_t get_sz = sizeof(get); constexpr int kDefaultTOS = 0; - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, kDefaultTOS); @@ -173,7 +173,7 @@ TEST_P(IPUnboundSocketTest, SetTOS) { int get = -1; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, set); @@ -188,7 +188,7 @@ TEST_P(IPUnboundSocketTest, ZeroTOS) { SyscallSucceedsWithValue(0)); int get = -1; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, set); @@ -210,7 +210,7 @@ TEST_P(IPUnboundSocketTest, InvalidLargeTOS) { } int get = -1; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, kDefaultTOS); @@ -229,7 +229,7 @@ TEST_P(IPUnboundSocketTest, CheckSkipECN) { } int get = -1; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, expect); @@ -249,7 +249,7 @@ TEST_P(IPUnboundSocketTest, ZeroTOSOptionSize) { } int get = -1; socklen_t get_sz = 0; - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, 0); EXPECT_EQ(get, -1); @@ -276,7 +276,7 @@ TEST_P(IPUnboundSocketTest, SmallTOSOptionSize) { } uint get = -1; socklen_t get_sz = i; - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, expect_sz); // Account for partial copies by getsockopt, retrieve the lower @@ -297,7 +297,7 @@ TEST_P(IPUnboundSocketTest, LargeTOSOptionSize) { // We expect the system call handler to only copy atmost sizeof(int) bytes // as asserted by the check below. Hence, we do not expect the copy to // overflow in getsockopt. - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(int)); EXPECT_EQ(get, set); @@ -325,7 +325,7 @@ TEST_P(IPUnboundSocketTest, NegativeTOS) { } int get = -1; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, expect); @@ -338,20 +338,20 @@ TEST_P(IPUnboundSocketTest, InvalidNegativeTOS) { TOSOption t = GetTOSOption(GetParam().domain); int expect; if (GetParam().domain == AF_INET) { - EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), + ASSERT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), SyscallSucceedsWithValue(0)); expect = static_cast(set); if (GetParam().protocol == IPPROTO_TCP) { expect &= ~INET_ECN_MASK; } } else { - EXPECT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), + ASSERT_THAT(setsockopt(socket->get(), t.level, t.option, &set, set_sz), SyscallFailsWithErrno(EINVAL)); expect = 0; } int get = 0; socklen_t get_sz = sizeof(get); - EXPECT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), + ASSERT_THAT(getsockopt(socket->get(), t.level, t.option, &get, &get_sz), SyscallSucceedsWithValue(0)); EXPECT_EQ(get_sz, sizeof(get)); EXPECT_EQ(get, expect); @@ -422,6 +422,38 @@ TEST_P(IPUnboundSocketTest, InsufficientBufferTOS) { EXPECT_THAT(sendmsg(socket->get(), &msg, 0), SyscallFailsWithErrno(EINVAL)); } +TEST_P(IPUnboundSocketTest, ReuseAddrDefault) { + auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + + // FIXME(b/76031995): gVisor TCP sockets default to SO_REUSEADDR enabled. + SKIP_IF(IsRunningOnGvisor() && + (GetParam().type & SOCK_STREAM) == SOCK_STREAM); + + int get = -1; + socklen_t get_sz = sizeof(get); + ASSERT_THAT( + getsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &get, &get_sz), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(get, kSockOptOff); + EXPECT_EQ(get_sz, sizeof(get)); +} + +TEST_P(IPUnboundSocketTest, SetReuseAddr) { + auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); + + ASSERT_THAT(setsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, + sizeof(kSockOptOn)), + SyscallSucceedsWithValue(0)); + + int get = -1; + socklen_t get_sz = sizeof(get); + ASSERT_THAT( + getsockopt(socket->get(), SOL_SOCKET, SO_REUSEADDR, &get, &get_sz), + SyscallSucceedsWithValue(0)); + EXPECT_EQ(get, kSockOptOn); + EXPECT_EQ(get_sz, sizeof(get)); +} + INSTANTIATE_TEST_SUITE_P( IPUnboundSockets, IPUnboundSocketTest, ::testing::ValuesIn(VecCat(VecCat( diff --git a/test/syscalls/linux/socket_ipv4_udp_unbound.cc b/test/syscalls/linux/socket_ipv4_udp_unbound.cc index 1294d9050..4db9553e1 100644 --- a/test/syscalls/linux/socket_ipv4_udp_unbound.cc +++ b/test/syscalls/linux/socket_ipv4_udp_unbound.cc @@ -1672,10 +1672,10 @@ TEST_P(IPv4UDPUnboundSocketTest, TestBindToBcastThenSend) { } // Check that SO_REUSEADDR always delivers to the most recently bound socket. -TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - +// +// FIXME(gvisor.dev/issue/873): Endpoint order is not restored correctly. Enable +// random and co-op save (below) once that is fixed. +TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution_NoRandomSave) { std::vector> sockets; sockets.emplace_back(ASSERT_NO_ERRNO_AND_VALUE(NewSocket())); @@ -1696,6 +1696,9 @@ TEST_P(IPv4UDPUnboundSocketTest, ReuseAddrDistribution) { constexpr int kMessageSize = 200; + // FIXME(gvisor.dev/issue/873): Endpoint order is not restored correctly. + const DisableSave ds; + for (int i = 0; i < 10; i++) { // Add a new receiver. sockets.emplace_back(ASSERT_NO_ERRNO_AND_VALUE(NewSocket())); @@ -1836,9 +1839,6 @@ TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConvertibleToReusePort) { } TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConvertibleToReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); @@ -1880,9 +1880,6 @@ TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConvertibleToReuseAddr) { } TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConversionReversable1) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); @@ -1927,9 +1924,6 @@ TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConversionReversable1) { } TEST_P(IPv4UDPUnboundSocketTest, BindReuseAddrReusePortConversionReversable2) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); @@ -2020,9 +2014,6 @@ TEST_P(IPv4UDPUnboundSocketTest, BindDoubleReuseAddrReusePortThenReusePort) { } TEST_P(IPv4UDPUnboundSocketTest, BindDoubleReuseAddrReusePortThenReuseAddr) { - // FIXME(b/129164367): Support SO_REUSEADDR on UDP sockets. - SKIP_IF(IsRunningOnGvisor()); - auto socket1 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket2 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); auto socket3 = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); -- cgit v1.2.3 From 505e8f4e3d72400194a69c220ce2f4288e957c4c Mon Sep 17 00:00:00 2001 From: Bhasker Hariharan Date: Wed, 17 Jun 2020 14:07:27 -0700 Subject: Fix TtlDefault test on linux. Different flavors of linux seem to use different defaults we accept 64 or 127 as the TtlDefault in the test. PiperOrigin-RevId: 316961150 --- test/syscalls/linux/socket_ip_unbound.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index 5536ab53a..796598eee 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -40,7 +40,7 @@ TEST_P(IPUnboundSocketTest, TtlDefault) { socklen_t get_sz = sizeof(get); EXPECT_THAT(getsockopt(socket->get(), IPPROTO_IP, IP_TTL, &get, &get_sz), SyscallSucceedsWithValue(0)); - EXPECT_EQ(get, 64); + EXPECT_TRUE(get == 64 || get == 127); EXPECT_EQ(get_sz, sizeof(get)); } -- cgit v1.2.3 From 2141013dcef04de0591e36462ea997be1b6374d7 Mon Sep 17 00:00:00 2001 From: Ian Gudger Date: Tue, 23 Jun 2020 19:14:05 -0700 Subject: Add support for SO_REUSEADDR to TCP sockets/endpoints. For TCP sockets, SO_REUSEADDR relaxes the rules for binding addresses. gVisor/netstack already supported a behavior similar to SO_REUSEADDR, but did not allow disabling it. This change brings the SO_REUSEADDR behavior closer to the behavior implemented by Linux and adds a new SO_REUSEADDR disabled behavior. Like Linux, SO_REUSEADDR is now disabled by default. PiperOrigin-RevId: 317984380 --- pkg/tcpip/ports/ports.go | 206 ++++++++++++++++---- pkg/tcpip/ports/ports_test.go | 58 +++++- pkg/tcpip/stack/stack.go | 6 + pkg/tcpip/stack/transport_demuxer.go | 65 ++++++- pkg/tcpip/transport/tcp/accept.go | 140 ++++++++++---- pkg/tcpip/transport/tcp/endpoint.go | 100 +++++----- pkg/tcpip/transport/tcp/endpoint_state.go | 31 +-- pkg/tcpip/transport/tcp/tcp_test.go | 18 ++ pkg/tcpip/transport/udp/endpoint.go | 8 +- .../linux/socket_bind_to_device_sequence.cc | 12 -- test/syscalls/linux/socket_inet_loopback.cc | 210 +++++++++++++++++---- test/syscalls/linux/socket_ip_unbound.cc | 4 - 12 files changed, 633 insertions(+), 225 deletions(-) (limited to 'test/syscalls/linux/socket_ip_unbound.cc') diff --git a/pkg/tcpip/ports/ports.go b/pkg/tcpip/ports/ports.go index edc29ad27..f6d592eb5 100644 --- a/pkg/tcpip/ports/ports.go +++ b/pkg/tcpip/ports/ports.go @@ -52,6 +52,9 @@ type Flags struct { // // LoadBalanced takes precidence over MostRecent. LoadBalanced bool + + // TupleOnly represents TCP SO_REUSEADDR. + TupleOnly bool } // Bits converts the Flags to their bitset form. @@ -63,6 +66,9 @@ func (f Flags) Bits() BitFlags { if f.LoadBalanced { rf |= LoadBalancedFlag } + if f.TupleOnly { + rf |= TupleOnlyFlag + } return rf } @@ -98,6 +104,9 @@ const ( // LoadBalancedFlag represents Flags.LoadBalanced. LoadBalancedFlag + // TupleOnlyFlag represents Flags.TupleOnly. + TupleOnlyFlag + // nextFlag is the value that the next added flag will have. // // It is used to calculate FlagMask below. It is also the number of @@ -106,6 +115,10 @@ const ( // FlagMask is a bit mask for BitFlags. FlagMask = nextFlag - 1 + + // MultiBindFlagMask contains the flags that allow binding the same + // tuple multiple times. + MultiBindFlagMask = MostRecentFlag | LoadBalancedFlag ) // ToFlags converts the bitset into a Flags struct. @@ -113,6 +126,7 @@ func (f BitFlags) ToFlags() Flags { return Flags{ MostRecent: f&MostRecentFlag != 0, LoadBalanced: f&LoadBalancedFlag != 0, + TupleOnly: f&TupleOnlyFlag != 0, } } @@ -175,9 +189,54 @@ func (c FlagCounter) IntersectionRefs() BitFlags { return intersection } +type destination struct { + addr tcpip.Address + port uint16 +} + +func makeDestination(a tcpip.FullAddress) destination { + return destination{ + a.Addr, + a.Port, + } +} + +// portNode is never empty. When it has no elements, it is removed from the +// map that references it. +type portNode map[destination]FlagCounter + +// intersectionRefs calculates the intersection of flag bit values which affect +// the specified destination. +// +// If no destinations are present, all flag values are returned as there are no +// entries to limit possible flag values of a new entry. +// +// In addition to the intersection, the number of intersecting refs is +// returned. +func (p portNode) intersectionRefs(dst destination) (BitFlags, int) { + intersection := FlagMask + var count int + + for d, f := range p { + if d == dst { + intersection &= f.IntersectionRefs() + count++ + continue + } + // Wildcard destinations affect all destinations for TupleOnly. + if d.addr == anyIPAddress || dst.addr == anyIPAddress { + // Only bitwise and the TupleOnlyFlag. + intersection &= ((^TupleOnlyFlag) | f.IntersectionRefs()) + count++ + } + } + + return intersection, count +} + // deviceNode is never empty. When it has no elements, it is removed from the // map that references it. -type deviceNode map[tcpip.NICID]FlagCounter +type deviceNode map[tcpip.NICID]portNode // isAvailable checks whether binding is possible by device. If not binding to a // device, check against all FlagCounters. If binding to a specific device, check @@ -186,17 +245,15 @@ type deviceNode map[tcpip.NICID]FlagCounter // If either of the port reuse flags is enabled on any of the nodes, all nodes // sharing a port must share at least one reuse flag. This matches Linux's // behavior. -func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID) bool { +func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID, dst destination) bool { flagBits := flags.Bits() if bindToDevice == 0 { - // Trying to binding all devices. - if flagBits == 0 { - // Can't bind because the (addr,port) is already bound. - return false - } intersection := FlagMask for _, p := range d { - i := p.IntersectionRefs() + i, c := p.intersectionRefs(dst) + if c == 0 { + continue + } intersection &= i if intersection&flagBits == 0 { // Can't bind because the (addr,port) was @@ -210,16 +267,17 @@ func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID) bool { intersection := FlagMask if p, ok := d[0]; ok { - intersection = p.IntersectionRefs() - if intersection&flagBits == 0 { + var c int + intersection, c = p.intersectionRefs(dst) + if c > 0 && intersection&flagBits == 0 { return false } } if p, ok := d[bindToDevice]; ok { - i := p.IntersectionRefs() + i, c := p.intersectionRefs(dst) intersection &= i - if intersection&flagBits == 0 { + if c > 0 && intersection&flagBits == 0 { return false } } @@ -233,12 +291,12 @@ type bindAddresses map[tcpip.Address]deviceNode // isAvailable checks whether an IP address is available to bind to. If the // address is the "any" address, check all other addresses. Otherwise, just // check against the "any" address and the provided address. -func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice tcpip.NICID) bool { +func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice tcpip.NICID, dst destination) bool { if addr == anyIPAddress { // If binding to the "any" address then check that there are no conflicts // with all addresses. for _, d := range b { - if !d.isAvailable(flags, bindToDevice) { + if !d.isAvailable(flags, bindToDevice, dst) { return false } } @@ -247,14 +305,14 @@ func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice // Check that there is no conflict with the "any" address. if d, ok := b[anyIPAddress]; ok { - if !d.isAvailable(flags, bindToDevice) { + if !d.isAvailable(flags, bindToDevice, dst) { return false } } // Check that this is no conflict with the provided address. if d, ok := b[addr]; ok { - if !d.isAvailable(flags, bindToDevice) { + if !d.isAvailable(flags, bindToDevice, dst) { return false } } @@ -320,17 +378,17 @@ func (s *PortManager) pickEphemeralPort(offset, count uint32, testPort func(p ui } // IsPortAvailable tests if the given port is available on all given protocols. -func (s *PortManager) IsPortAvailable(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID) bool { +func (s *PortManager) IsPortAvailable(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) bool { s.mu.Lock() defer s.mu.Unlock() - return s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice) + return s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice, makeDestination(dest)) } -func (s *PortManager) isPortAvailableLocked(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID) bool { +func (s *PortManager) isPortAvailableLocked(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dst destination) bool { for _, network := range networks { desc := portDescriptor{network, transport, port} if addrs, ok := s.allocatedPorts[desc]; ok { - if !addrs.isAvailable(addr, flags, bindToDevice) { + if !addrs.isAvailable(addr, flags, bindToDevice, dst) { return false } } @@ -342,14 +400,16 @@ func (s *PortManager) isPortAvailableLocked(networks []tcpip.NetworkProtocolNumb // reserved by another endpoint. If port is zero, ReservePort will search for // an unreserved ephemeral port and reserve it, returning its value in the // "port" return value. -func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID) (reservedPort uint16, err *tcpip.Error) { +func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) (reservedPort uint16, err *tcpip.Error) { s.mu.Lock() defer s.mu.Unlock() + dst := makeDestination(dest) + // If a port is specified, just try to reserve it for all network // protocols. if port != 0 { - if !s.reserveSpecificPort(networks, transport, addr, port, flags, bindToDevice) { + if !s.reserveSpecificPort(networks, transport, addr, port, flags, bindToDevice, dst) { return 0, tcpip.ErrPortInUse } return port, nil @@ -357,15 +417,16 @@ func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transp // A port wasn't specified, so try to find one. return s.PickEphemeralPort(func(p uint16) (bool, *tcpip.Error) { - return s.reserveSpecificPort(networks, transport, addr, p, flags, bindToDevice), nil + return s.reserveSpecificPort(networks, transport, addr, p, flags, bindToDevice, dst), nil }) } // reserveSpecificPort tries to reserve the given port on all given protocols. -func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID) bool { - if !s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice) { +func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dst destination) bool { + if !s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice, dst) { return false } + flagBits := flags.Bits() // Reserve port on all network protocols. @@ -381,9 +442,65 @@ func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber d = make(deviceNode) m[addr] = d } - n := d[bindToDevice] + p := d[bindToDevice] + if p == nil { + p = make(portNode) + } + n := p[dst] n.AddRef(flagBits) - d[bindToDevice] = n + p[dst] = n + d[bindToDevice] = p + } + + return true +} + +// ReserveTuple adds a port reservation for the tuple on all given protocol. +func (s *PortManager) ReserveTuple(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) bool { + flagBits := flags.Bits() + dst := makeDestination(dest) + + s.mu.Lock() + defer s.mu.Unlock() + + // It is easier to undo the entire reservation, so if we find that the + // tuple can't be fully added, finish and undo the whole thing. + undo := false + + // Reserve port on all network protocols. + for _, network := range networks { + desc := portDescriptor{network, transport, port} + m, ok := s.allocatedPorts[desc] + if !ok { + m = make(bindAddresses) + s.allocatedPorts[desc] = m + } + d, ok := m[addr] + if !ok { + d = make(deviceNode) + m[addr] = d + } + p := d[bindToDevice] + if p == nil { + p = make(portNode) + } + + n := p[dst] + if n.TotalRefs() != 0 && n.IntersectionRefs()&flagBits == 0 { + // Tuple already exists. + undo = true + } + n.AddRef(flagBits) + p[dst] = n + d[bindToDevice] = p + } + + if undo { + // releasePortLocked decrements the counts (rather than setting + // them to zero), so it will undo the incorrect incrementing + // above. + s.releasePortLocked(networks, transport, addr, port, flagBits, bindToDevice, dst) + return false } return true @@ -391,12 +508,14 @@ func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber // ReleasePort releases the reservation on a port/IP combination so that it can // be reserved by other endpoints. -func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID) { +func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) { s.mu.Lock() defer s.mu.Unlock() - flagBits := flags.Bits() + s.releasePortLocked(networks, transport, addr, port, flags.Bits(), bindToDevice, makeDestination(dest)) +} +func (s *PortManager) releasePortLocked(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags BitFlags, bindToDevice tcpip.NICID, dst destination) { for _, network := range networks { desc := portDescriptor{network, transport, port} if m, ok := s.allocatedPorts[desc]; ok { @@ -404,21 +523,32 @@ func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transp if !ok { continue } - n, ok := d[bindToDevice] + p, ok := d[bindToDevice] if !ok { continue } - n.refs[flagBits]-- - d[bindToDevice] = n - if n.TotalRefs() == 0 { - delete(d, bindToDevice) + n, ok := p[dst] + if !ok { + continue + } + n.DropRef(flags) + if n.TotalRefs() > 0 { + p[dst] = n + continue } - if len(d) == 0 { - delete(m, addr) + delete(p, dst) + if len(p) > 0 { + continue + } + delete(d, bindToDevice) + if len(d) > 0 { + continue } - if len(m) == 0 { - delete(s.allocatedPorts, desc) + delete(m, addr) + if len(m) > 0 { + continue } + delete(s.allocatedPorts, desc) } } } diff --git a/pkg/tcpip/ports/ports_test.go b/pkg/tcpip/ports/ports_test.go index d6969d050..58db5868c 100644 --- a/pkg/tcpip/ports/ports_test.go +++ b/pkg/tcpip/ports/ports_test.go @@ -36,6 +36,7 @@ type portReserveTestAction struct { flags Flags release bool device tcpip.NICID + dest tcpip.FullAddress } func TestPortReservation(t *testing.T) { @@ -272,6 +273,54 @@ func TestPortReservation(t *testing.T) { {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true, LoadBalanced: true}, want: nil}, {port: 24, ip: fakeIPAddress, flags: Flags{MostRecent: true}, want: tcpip.ErrPortInUse}, }, + }, { + tname: "bind tuple with reuseaddr, and then wildcard with reuseaddr", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil}, + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{}, want: nil}, + }, + }, { + tname: "bind tuple with reuseaddr, and then wildcard", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil}, + {port: 24, ip: fakeIPAddress, want: tcpip.ErrPortInUse}, + }, + }, { + tname: "bind wildcard with reuseaddr, and then tuple with reuseaddr", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{}, want: nil}, + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil}, + }, + }, { + tname: "bind tuple with reuseaddr, and then wildcard", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, want: nil}, + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: tcpip.ErrPortInUse}, + }, + }, { + tname: "bind two tuples with reuseaddr", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil}, + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 25}, want: nil}, + }, + }, { + tname: "bind two tuples", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: nil}, + {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 25}, want: nil}, + }, + }, { + tname: "bind wildcard, and then tuple with reuseaddr", + actions: []portReserveTestAction{ + {port: 24, ip: fakeIPAddress, dest: tcpip.FullAddress{}, want: nil}, + {port: 24, ip: fakeIPAddress, flags: Flags{TupleOnly: true}, dest: tcpip.FullAddress{Addr: fakeIPAddress, Port: 24}, want: tcpip.ErrPortInUse}, + }, + }, { + tname: "bind wildcard twice with reuseaddr", + actions: []portReserveTestAction{ + {port: 24, ip: anyIPAddress, flags: Flags{TupleOnly: true}, want: nil}, + {port: 24, ip: anyIPAddress, flags: Flags{TupleOnly: true}, want: nil}, + }, }, } { t.Run(test.tname, func(t *testing.T) { @@ -280,19 +329,18 @@ func TestPortReservation(t *testing.T) { for _, test := range test.actions { if test.release { - pm.ReleasePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device) + pm.ReleasePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest) continue } - gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device) + gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest) if err != test.want { - t.Fatalf("ReservePort(.., .., %s, %d, %+v, %d) = %v, want %v", test.ip, test.port, test.flags, test.device, err, test.want) + t.Fatalf("ReservePort(.., .., %s, %d, %+v, %d, %v) = %v, want %v", test.ip, test.port, test.flags, test.device, test.dest, err, test.want) } if test.port == 0 && (gotPort == 0 || gotPort < FirstEphemeral) { - t.Fatalf("ReservePort(.., .., .., 0) = %d, want port number >= %d to be picked", gotPort, FirstEphemeral) + t.Fatalf("ReservePort(.., .., .., 0, ..) = %d, want port number >= %d to be picked", gotPort, FirstEphemeral) } } }) - } } diff --git a/pkg/tcpip/stack/stack.go b/pkg/tcpip/stack/stack.go index 51abe32a7..e92ec0c24 100644 --- a/pkg/tcpip/stack/stack.go +++ b/pkg/tcpip/stack/stack.go @@ -1408,6 +1408,12 @@ func (s *Stack) RegisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.N return s.demux.registerEndpoint(netProtos, protocol, id, ep, flags, bindToDevice) } +// CheckRegisterTransportEndpoint checks if an endpoint can be registered with +// the stack transport dispatcher. +func (s *Stack) CheckRegisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { + return s.demux.checkEndpoint(netProtos, protocol, id, flags, bindToDevice) +} + // UnregisterTransportEndpoint removes the endpoint with the given id from the // stack transport dispatcher. func (s *Stack) UnregisterTransportEndpoint(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { diff --git a/pkg/tcpip/stack/transport_demuxer.go b/pkg/tcpip/stack/transport_demuxer.go index 118b449d5..b902c6ca9 100644 --- a/pkg/tcpip/stack/transport_demuxer.go +++ b/pkg/tcpip/stack/transport_demuxer.go @@ -221,6 +221,18 @@ func (epsByNIC *endpointsByNIC) registerEndpoint(d *transportDemuxer, netProto t return multiPortEp.singleRegisterEndpoint(t, flags) } +func (epsByNIC *endpointsByNIC) checkEndpoint(d *transportDemuxer, netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { + epsByNIC.mu.RLock() + defer epsByNIC.mu.RUnlock() + + multiPortEp, ok := epsByNIC.endpoints[bindToDevice] + if !ok { + return nil + } + + return multiPortEp.singleCheckEndpoint(flags) +} + // unregisterEndpoint returns true if endpointsByNIC has to be unregistered. func (epsByNIC *endpointsByNIC) unregisterEndpoint(bindToDevice tcpip.NICID, t TransportEndpoint, flags ports.Flags) bool { epsByNIC.mu.Lock() @@ -289,6 +301,17 @@ func (d *transportDemuxer) registerEndpoint(netProtos []tcpip.NetworkProtocolNum return nil } +// checkEndpoint checks if an endpoint can be registered with the dispatcher. +func (d *transportDemuxer) checkEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { + for _, n := range netProtos { + if err := d.singleCheckEndpoint(n, protocol, id, flags, bindToDevice); err != nil { + return err + } + } + + return nil +} + // multiPortEndpoint is a container for TransportEndpoints which are bound to // the same pair of address and port. endpointsArr always has at least one // element. @@ -380,7 +403,7 @@ func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, flags p ep.mu.Lock() defer ep.mu.Unlock() - bits := flags.Bits() + bits := flags.Bits() & ports.MultiBindFlagMask if len(ep.endpoints) != 0 { // If it was previously bound, we need to check if we can bind again. @@ -395,6 +418,22 @@ func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, flags p return nil } +func (ep *multiPortEndpoint) singleCheckEndpoint(flags ports.Flags) *tcpip.Error { + ep.mu.RLock() + defer ep.mu.RUnlock() + + bits := flags.Bits() & ports.MultiBindFlagMask + + if len(ep.endpoints) != 0 { + // If it was previously bound, we need to check if we can bind again. + if ep.flags.TotalRefs() > 0 && bits&ep.flags.IntersectionRefs() == 0 { + return tcpip.ErrPortInUse + } + } + + return nil +} + // unregisterEndpoint returns true if multiPortEndpoint has to be unregistered. func (ep *multiPortEndpoint) unregisterEndpoint(t TransportEndpoint, flags ports.Flags) bool { ep.mu.Lock() @@ -406,7 +445,7 @@ func (ep *multiPortEndpoint) unregisterEndpoint(t TransportEndpoint, flags ports ep.endpoints[len(ep.endpoints)-1] = nil ep.endpoints = ep.endpoints[:len(ep.endpoints)-1] - ep.flags.DropRef(flags.Bits()) + ep.flags.DropRef(flags.Bits() & ports.MultiBindFlagMask) break } } @@ -439,6 +478,28 @@ func (d *transportDemuxer) singleRegisterEndpoint(netProto tcpip.NetworkProtocol return epsByNIC.registerEndpoint(d, netProto, protocol, ep, flags, bindToDevice) } +func (d *transportDemuxer) singleCheckEndpoint(netProto tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, flags ports.Flags, bindToDevice tcpip.NICID) *tcpip.Error { + if id.RemotePort != 0 { + // SO_REUSEPORT only applies to bound/listening endpoints. + flags.LoadBalanced = false + } + + eps, ok := d.protocol[protocolIDs{netProto, protocol}] + if !ok { + return tcpip.ErrUnknownProtocol + } + + eps.mu.RLock() + defer eps.mu.RUnlock() + + epsByNIC, ok := eps.endpoints[id] + if !ok { + return nil + } + + return epsByNIC.checkEndpoint(d, netProto, protocol, flags, bindToDevice) +} + // unregisterEndpoint unregisters the endpoint with the given id such that it // won't receive any more packets. func (d *transportDemuxer) unregisterEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) { diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index 7679fe169..6e00e5526 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -27,7 +27,6 @@ import ( "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" - "gvisor.dev/gvisor/pkg/tcpip/ports" "gvisor.dev/gvisor/pkg/tcpip/seqnum" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/waiter" @@ -199,9 +198,8 @@ func (l *listenContext) isCookieValid(id stack.TransportEndpointID, cookie seqnu } // createConnectingEndpoint creates a new endpoint in a connecting state, with -// the connection parameters given by the arguments. The endpoint is returned -// with n.mu held. -func (l *listenContext) createConnectingEndpoint(s *segment, iss seqnum.Value, irs seqnum.Value, rcvdSynOpts *header.TCPSynOptions, queue *waiter.Queue) (*endpoint, *tcpip.Error) { +// the connection parameters given by the arguments. +func (l *listenContext) createConnectingEndpoint(s *segment, iss seqnum.Value, irs seqnum.Value, rcvdSynOpts *header.TCPSynOptions, queue *waiter.Queue) *endpoint { // Create a new endpoint. netProto := l.netProto if netProto == 0 { @@ -227,22 +225,7 @@ func (l *listenContext) createConnectingEndpoint(s *segment, iss seqnum.Value, i // window to grow to a really large value. n.rcvAutoParams.prevCopied = n.initialReceiveWindow() - // Lock the endpoint before registering to ensure that no out of - // band changes are possible due to incoming packets etc till - // the endpoint is done initializing. - n.mu.Lock() - - // Register new endpoint so that packets are routed to it. - if err := n.stack.RegisterTransportEndpoint(n.boundNICID, n.effectiveNetProtos, ProtocolNumber, n.ID, n, ports.Flags{LoadBalanced: n.reusePort}, n.boundBindToDevice); err != nil { - n.mu.Unlock() - n.Close() - return nil, err - } - - n.isRegistered = true - n.registeredReusePort = n.reusePort - - return n, nil + return n } // createEndpointAndPerformHandshake creates a new endpoint in connected state @@ -253,10 +236,12 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head // Create new endpoint. irs := s.sequenceNumber isn := generateSecureISN(s.id, l.stack.Seed()) - ep, err := l.createConnectingEndpoint(s, isn, irs, opts, queue) - if err != nil { - return nil, err - } + ep := l.createConnectingEndpoint(s, isn, irs, opts, queue) + + // Lock the endpoint before registering to ensure that no out of + // band changes are possible due to incoming packets etc till + // the endpoint is done initializing. + ep.mu.Lock() ep.owner = owner // listenEP is nil when listenContext is used by tcp.Forwarder. @@ -264,18 +249,13 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head if l.listenEP != nil { l.listenEP.mu.Lock() if l.listenEP.EndpointState() != StateListen { + l.listenEP.mu.Unlock() // Ensure we release any registrations done by the newly // created endpoint. ep.mu.Unlock() ep.Close() - // Wake up any waiters. This is strictly not required normally - // as a socket that was never accepted can't really have any - // registered waiters except when stack.Wait() is called which - // waits for all registered endpoints to stop and expects an - // EventHUp. - ep.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut) return nil, tcpip.ErrConnectionAborted } l.addPendingEndpoint(ep) @@ -284,21 +264,44 @@ func (l *listenContext) createEndpointAndPerformHandshake(s *segment, opts *head // to the newly created endpoint. l.listenEP.propagateInheritableOptionsLocked(ep) + if !ep.reserveTupleLocked() { + ep.mu.Unlock() + ep.Close() + + if l.listenEP != nil { + l.removePendingEndpoint(ep) + l.listenEP.mu.Unlock() + } + + return nil, tcpip.ErrConnectionAborted + } + deferAccept = l.listenEP.deferAccept l.listenEP.mu.Unlock() } + // Register new endpoint so that packets are routed to it. + if err := ep.stack.RegisterTransportEndpoint(ep.boundNICID, ep.effectiveNetProtos, ProtocolNumber, ep.ID, ep, ep.boundPortFlags, ep.boundBindToDevice); err != nil { + ep.mu.Unlock() + ep.Close() + + if l.listenEP != nil { + l.removePendingEndpoint(ep) + } + + ep.drainClosingSegmentQueue() + + return nil, err + } + + ep.isRegistered = true + // Perform the 3-way handshake. h := newPassiveHandshake(ep, seqnum.Size(ep.initialReceiveWindow()), isn, irs, opts, deferAccept) if err := h.execute(); err != nil { ep.mu.Unlock() ep.Close() - // Wake up any waiters. This is strictly not required normally - // as a socket that was never accepted can't really have any - // registered waiters except when stack.Wait() is called which - // waits for all registered endpoints to stop and expects an - // EventHUp. - ep.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut) + ep.notifyAborted() if l.listenEP != nil { l.removePendingEndpoint(ep) @@ -374,6 +377,43 @@ func (e *endpoint) deliverAccepted(n *endpoint) { // Precondition: e.mu and n.mu must be held. func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) { n.userTimeout = e.userTimeout + n.portFlags = e.portFlags + n.boundBindToDevice = e.boundBindToDevice + n.boundPortFlags = e.boundPortFlags +} + +// reserveTupleLocked reserves an accepted endpoint's tuple. +// +// Preconditions: +// * propagateInheritableOptionsLocked has been called. +// * e.mu is held. +func (e *endpoint) reserveTupleLocked() bool { + dest := tcpip.FullAddress{Addr: e.ID.RemoteAddress, Port: e.ID.RemotePort} + if !e.stack.ReserveTuple( + e.effectiveNetProtos, + ProtocolNumber, + e.ID.LocalAddress, + e.ID.LocalPort, + e.boundPortFlags, + e.boundBindToDevice, + dest, + ) { + return false + } + + e.isPortReserved = true + e.boundDest = dest + return true +} + +// notifyAborted wakes up any waiters on registered, but not accepted +// endpoints. +// +// This is strictly not required normally as a socket that was never accepted +// can't really have any registered waiters except when stack.Wait() is called +// which waits for all registered endpoints to stop and expects an EventHUp. +func (e *endpoint) notifyAborted() { + e.waiterQueue.Notify(waiter.EventHUp | waiter.EventErr | waiter.EventIn | waiter.EventOut) } // handleSynSegment is called in its own goroutine once the listening endpoint @@ -568,16 +608,34 @@ func (e *endpoint) handleListenSegment(ctx *listenContext, s *segment) { rcvdSynOptions.TSEcr = s.parsedOptions.TSEcr } - n, err := ctx.createConnectingEndpoint(s, iss, irs, rcvdSynOptions, &waiter.Queue{}) - if err != nil { + n := ctx.createConnectingEndpoint(s, iss, irs, rcvdSynOptions, &waiter.Queue{}) + + n.mu.Lock() + + // Propagate any inheritable options from the listening endpoint + // to the newly created endpoint. + e.propagateInheritableOptionsLocked(n) + + if !n.reserveTupleLocked() { + n.mu.Unlock() + n.Close() + e.stack.Stats().TCP.FailedConnectionAttempts.Increment() e.stats.FailedConnectionAttempts.Increment() return } - // Propagate any inheritable options from the listening endpoint - // to the newly created endpoint. - e.propagateInheritableOptionsLocked(n) + // Register new endpoint so that packets are routed to it. + if err := n.stack.RegisterTransportEndpoint(n.boundNICID, n.effectiveNetProtos, ProtocolNumber, n.ID, n, n.boundPortFlags, n.boundBindToDevice); err != nil { + n.mu.Unlock() + n.Close() + + e.stack.Stats().TCP.FailedConnectionAttempts.Increment() + e.stats.FailedConnectionAttempts.Increment() + return + } + + n.isRegistered = true // clear the tsOffset for the newly created // endpoint as the Timestamp was already diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go index 10df2bcd5..1e4c2f507 100644 --- a/pkg/tcpip/transport/tcp/endpoint.go +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -396,7 +396,8 @@ type endpoint struct { mu sync.Mutex `state:"nosave"` ownedByUser uint32 - // state must be read/set using the EndpointState()/setEndpointState() methods. + // state must be read/set using the EndpointState()/setEndpointState() + // methods. state EndpointState `state:".(EndpointState)"` // origEndpointState is only used during a restore phase to save the @@ -405,8 +406,8 @@ type endpoint struct { origEndpointState EndpointState `state:"nosave"` isPortReserved bool `state:"manual"` - isRegistered bool - boundNICID tcpip.NICID `state:"manual"` + isRegistered bool `state:"manual"` + boundNICID tcpip.NICID route stack.Route `state:"manual"` ttl uint8 v6only bool @@ -415,10 +416,14 @@ type endpoint struct { // disabling SO_BROADCAST, albeit as a NOOP. broadcast bool + // portFlags stores the current values of port related flags. + portFlags ports.Flags + // Values used to reserve a port or register a transport endpoint // (which ever happens first). boundBindToDevice tcpip.NICID boundPortFlags ports.Flags + boundDest tcpip.FullAddress // effectiveNetProtos contains the network protocols actually in use. In // most cases it will only contain "netProto", but in cases like IPv6 @@ -426,7 +431,7 @@ type endpoint struct { // protocols (e.g., IPv6 and IPv4) or a single different protocol (e.g., // IPv4 when IPv6 endpoint is bound or connected to an IPv4 mapped // address). - effectiveNetProtos []tcpip.NetworkProtocolNumber `state:"manual"` + effectiveNetProtos []tcpip.NetworkProtocolNumber // workerRunning specifies if a worker goroutine is running. workerRunning bool @@ -462,13 +467,6 @@ type endpoint struct { // sack holds TCP SACK related information for this endpoint. sack SACKInfo - // reusePort is set to true if SO_REUSEPORT is enabled. - reusePort bool - - // registeredReusePort is set if the current endpoint registration was - // done with SO_REUSEPORT enabled. - registeredReusePort bool - // bindToDevice is set to the NIC on which to bind or disabled if 0. bindToDevice tcpip.NICID @@ -488,7 +486,6 @@ type endpoint struct { // The options below aren't implemented, but we remember the user // settings because applications expect to be able to set/query these // options. - reuseAddr bool // slowAck holds the negated state of quick ack. It is stubbed out and // does nothing. @@ -838,7 +835,6 @@ func newEndpoint(s *stack.Stack, netProto tcpip.NetworkProtocolNumber, waiterQue rcvBufSize: DefaultReceiveBufferSize, sndBufSize: DefaultSendBufferSize, sndMTU: int(math.MaxInt32), - reuseAddr: true, keepalive: keepalive{ // Linux defaults. idle: 2 * time.Hour, @@ -1025,15 +1021,15 @@ func (e *endpoint) closeNoShutdownLocked() { // in Listen() when trying to register. if e.EndpointState() == StateListen && e.isPortReserved { if e.isRegistered { - e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.registeredReusePort}, e.boundBindToDevice) + e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice) e.isRegistered = false - e.registeredReusePort = false } - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, e.boundDest) e.isPortReserved = false e.boundBindToDevice = 0 e.boundPortFlags = ports.Flags{} + e.boundDest = tcpip.FullAddress{} } // Mark endpoint as closed. @@ -1091,17 +1087,17 @@ func (e *endpoint) cleanupLocked() { e.workerCleanup = false if e.isRegistered { - e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.registeredReusePort}, e.boundBindToDevice) + e.stack.StartTransportEndpointCleanup(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice) e.isRegistered = false - e.registeredReusePort = false } if e.isPortReserved { - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, e.boundDest) e.isPortReserved = false } e.boundBindToDevice = 0 e.boundPortFlags = ports.Flags{} + e.boundDest = tcpip.FullAddress{} e.route.Release() e.stack.CompleteTransportEndpointCleanup(e) @@ -1522,12 +1518,12 @@ func (e *endpoint) SetSockOptBool(opt tcpip.SockOptBool, v bool) *tcpip.Error { case tcpip.ReuseAddressOption: e.LockUser() - e.reuseAddr = v + e.portFlags.TupleOnly = v e.UnlockUser() case tcpip.ReusePortOption: e.LockUser() - e.reusePort = v + e.portFlags.LoadBalanced = v e.UnlockUser() case tcpip.V6OnlyOption: @@ -1831,14 +1827,14 @@ func (e *endpoint) GetSockOptBool(opt tcpip.SockOptBool) (bool, *tcpip.Error) { case tcpip.ReuseAddressOption: e.LockUser() - v := e.reuseAddr + v := e.portFlags.TupleOnly e.UnlockUser() return v, nil case tcpip.ReusePortOption: e.LockUser() - v := e.reusePort + v := e.portFlags.LoadBalanced e.UnlockUser() return v, nil @@ -2091,8 +2087,6 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc } defer r.Release() - origID := e.ID - netProtos := []tcpip.NetworkProtocolNumber{netProto} e.ID.LocalAddress = r.LocalAddress e.ID.RemoteAddress = r.RemoteAddress @@ -2100,11 +2094,10 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc if e.ID.LocalPort != 0 { // The endpoint is bound to a port, attempt to register it. - err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.reusePort}, e.boundBindToDevice) + err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice) if err != nil { return err } - e.registeredReusePort = e.reusePort } else { // The endpoint doesn't have a local port yet, so try to get // one. Make sure that it isn't one that will result in the same @@ -2128,40 +2121,33 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) *tc if sameAddr && p == e.ID.RemotePort { return false, nil } - // reusePort is false below because connect cannot reuse a port even if - // reusePort was set. - if !e.stack.IsPortAvailable(netProtos, ProtocolNumber, e.ID.LocalAddress, p, ports.Flags{LoadBalanced: false}, e.bindToDevice) { + if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr); err != nil { return false, nil } id := e.ID id.LocalPort = p - switch e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, ports.Flags{LoadBalanced: e.reusePort}, e.bindToDevice) { - case nil: - // Port picking successful. Save the details of - // the selected port. - e.ID = id - e.boundBindToDevice = e.bindToDevice - e.registeredReusePort = e.reusePort - return true, nil - case tcpip.ErrPortInUse: - return false, nil - default: + if err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.portFlags, e.bindToDevice); err != nil { + e.stack.ReleasePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, e.bindToDevice, addr) + if err == tcpip.ErrPortInUse { + return false, nil + } return false, err } + + // Port picking successful. Save the details of + // the selected port. + e.ID = id + e.isPortReserved = true + e.boundBindToDevice = e.bindToDevice + e.boundPortFlags = e.portFlags + e.boundDest = addr + return true, nil }); err != nil { return err } } - // Remove the port reservation. This can happen when Bind is called - // before Connect: in such a case we don't want to hold on to - // reservations anymore. - if e.isPortReserved { - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, origID.LocalAddress, origID.LocalPort, e.boundPortFlags, e.boundBindToDevice) - e.isPortReserved = false - } - e.isRegistered = true e.setEndpointState(StateConnecting) e.route = r.Clone() @@ -2340,13 +2326,12 @@ func (e *endpoint) listen(backlog int) *tcpip.Error { } // Register the endpoint. - if err := e.stack.RegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, ports.Flags{LoadBalanced: e.reusePort}, e.boundBindToDevice); err != nil { + if err := e.stack.RegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice); err != nil { return err } e.isRegistered = true e.setEndpointState(StateListen) - e.registeredReusePort = e.reusePort // The channel may be non-nil when we're restoring the endpoint, and it // may be pre-populated with some previously accepted (but not Accepted) @@ -2433,16 +2418,13 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) { } } - flags := ports.Flags{ - LoadBalanced: e.reusePort, - } - port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, flags, e.bindToDevice) + port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, e.bindToDevice, tcpip.FullAddress{}) if err != nil { return err } e.boundBindToDevice = e.bindToDevice - e.boundPortFlags = flags + e.boundPortFlags = e.portFlags e.isPortReserved = true e.effectiveNetProtos = netProtos e.ID.LocalPort = port @@ -2450,7 +2432,7 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) { // Any failures beyond this point must remove the port registration. defer func(portFlags ports.Flags, bindToDevice tcpip.NICID) { if err != nil { - e.stack.ReleasePort(netProtos, ProtocolNumber, addr.Addr, port, portFlags, bindToDevice) + e.stack.ReleasePort(netProtos, ProtocolNumber, addr.Addr, port, portFlags, bindToDevice, tcpip.FullAddress{}) e.isPortReserved = false e.effectiveNetProtos = nil e.ID.LocalPort = 0 @@ -2473,6 +2455,10 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err *tcpip.Error) { e.ID.LocalAddress = addr.Addr } + if err := e.stack.CheckRegisterTransportEndpoint(e.boundNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e.boundPortFlags, e.boundBindToDevice); err != nil { + return err + } + // Mark endpoint as bound. e.setEndpointState(StateBound) diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go index 0bebec2d1..8258c0ecc 100644 --- a/pkg/tcpip/transport/tcp/endpoint_state.go +++ b/pkg/tcpip/transport/tcp/endpoint_state.go @@ -93,10 +93,6 @@ func (e *endpoint) beforeSave() { if e.waiterQueue != nil && !e.waiterQueue.IsEmpty() { panic("endpoint still has waiters upon save") } - - if e.EndpointState() != StateClose && !((e.EndpointState() == StateBound || e.EndpointState() == StateListen) == e.isPortReserved) { - panic("endpoints which are not in the closed state must have a reserved port IFF they are in bound or listen state") - } } // saveAcceptedChan is invoked by stateify. @@ -198,14 +194,17 @@ func (e *endpoint) Resume(s *stack.Stack) { } bind := func() { - if len(e.BindAddr) == 0 { - e.BindAddr = e.ID.LocalAddress + addr, _, err := e.checkV4MappedLocked(tcpip.FullAddress{Addr: e.BindAddr, Port: e.ID.LocalPort}) + if err != nil { + panic("unable to parse BindAddr: " + err.String()) } - addr := e.BindAddr - port := e.ID.LocalPort - if err := e.Bind(tcpip.FullAddress{Addr: addr, Port: port}); err != nil { - panic(fmt.Sprintf("endpoint binding [%v]:%d failed: %v", addr, port, err)) + if ok := e.stack.ReserveTuple(e.effectiveNetProtos, ProtocolNumber, addr.Addr, addr.Port, e.boundPortFlags, e.boundBindToDevice, e.boundDest); !ok { + panic(fmt.Sprintf("unable to re-reserve tuple (%v, %q, %d, %+v, %d, %v)", e.effectiveNetProtos, addr.Addr, addr.Port, e.boundPortFlags, e.boundBindToDevice, e.boundDest)) } + e.isPortReserved = true + + // Mark endpoint as bound. + e.setEndpointState(StateBound) } switch { @@ -277,17 +276,7 @@ func (e *endpoint) Resume(s *stack.Stack) { tcpip.AsyncLoading.Done() }() case epState == StateClose: - if e.isPortReserved { - tcpip.AsyncLoading.Add(1) - go func() { - connectedLoading.Wait() - listenLoading.Wait() - connectingLoading.Wait() - bind() - e.setEndpointState(StateClose) - tcpip.AsyncLoading.Done() - }() - } + e.isPortReserved = false e.state = StateClose e.stack.CompleteTransportEndpointCleanup(e) tcpip.DeleteDanglingEndpoint(e) diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go index aca6a7951..2632a3c67 100644 --- a/pkg/tcpip/transport/tcp/tcp_test.go +++ b/pkg/tcpip/transport/tcp/tcp_test.go @@ -3879,6 +3879,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } @@ -3888,6 +3891,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } @@ -3898,6 +3904,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } @@ -3910,6 +3919,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } @@ -3920,6 +3932,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } @@ -3932,6 +3947,9 @@ func TestReusePort(t *testing.T) { if err != nil { t.Fatalf("NewEndpoint failed; %s", err) } + if err := c.EP.SetSockOptBool(tcpip.ReuseAddressOption, true); err != nil { + t.Fatalf("SetSockOptBool ReuseAddressOption failed: %s", err) + } if err := c.EP.Bind(tcpip.FullAddress{Port: context.StackPort}); err != nil { t.Fatalf("Bind failed: %s", err) } diff --git a/pkg/tcpip/transport/udp/endpoint.go b/pkg/tcpip/transport/udp/endpoint.go index 40d66ef09..6ea212093 100644 --- a/pkg/tcpip/transport/udp/endpoint.go +++ b/pkg/tcpip/transport/udp/endpoint.go @@ -231,7 +231,7 @@ func (e *endpoint) Close() { switch e.state { case StateBound, StateConnected: e.stack.UnregisterTransportEndpoint(e.RegisterNICID, e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice) - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice) + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, tcpip.FullAddress{}) e.boundBindToDevice = 0 e.boundPortFlags = ports.Flags{} } @@ -1047,7 +1047,7 @@ func (e *endpoint) Disconnect() *tcpip.Error { } else { if e.ID.LocalPort != 0 { // Release the ephemeral port. - e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, boundPortFlags, e.boundBindToDevice) + e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, boundPortFlags, e.boundBindToDevice, tcpip.FullAddress{}) e.boundPortFlags = ports.Flags{} } e.state = StateInitial @@ -1198,7 +1198,7 @@ func (*endpoint) Accept() (tcpip.Endpoint, *waiter.Queue, *tcpip.Error) { func (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, tcpip.NICID, *tcpip.Error) { if e.ID.LocalPort == 0 { - port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice) + port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, e.bindToDevice, tcpip.FullAddress{}) if err != nil { return id, e.bindToDevice, err } @@ -1208,7 +1208,7 @@ func (e *endpoint) registerWithStack(nicID tcpip.NICID, netProtos []tcpip.Networ err := e.stack.RegisterTransportEndpoint(nicID, netProtos, ProtocolNumber, id, e, e.boundPortFlags, e.bindToDevice) if err != nil { - e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, e.bindToDevice) + e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, e.bindToDevice, tcpip.FullAddress{}) e.boundPortFlags = ports.Flags{} } return id, e.bindToDevice, err diff --git a/test/syscalls/linux/socket_bind_to_device_sequence.cc b/test/syscalls/linux/socket_bind_to_device_sequence.cc index 4b2cb0606..6557c8e8e 100644 --- a/test/syscalls/linux/socket_bind_to_device_sequence.cc +++ b/test/syscalls/linux/socket_bind_to_device_sequence.cc @@ -363,10 +363,6 @@ TEST_P(BindToDeviceSequenceTest, BindTwiceWithReuseOnce) { } TEST_P(BindToDeviceSequenceTest, BindWithReuseAddr) { - // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. - SKIP_IF(IsRunningOnGvisor() && - (GetParam().type & SOCK_STREAM) == SOCK_STREAM); - ASSERT_NO_FATAL_FAILURE( BindSocket(/* reusePort */ false, /* reuse_addr */ true)); ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ false, @@ -406,10 +402,6 @@ TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReusePort) { } TEST_P(BindToDeviceSequenceTest, BindReuseAddrReusePortThenReuseAddr) { - // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. - SKIP_IF(IsRunningOnGvisor() && - (GetParam().type & SOCK_STREAM) == SOCK_STREAM); - ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true, /* reuse_addr */ true, /* bind_to_device */ 0)); @@ -436,10 +428,6 @@ TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReusePort) { } TEST_P(BindToDeviceSequenceTest, BindDoubleReuseAddrReusePortThenReuseAddr) { - // FIXME(b/158253835): Support SO_REUSEADDR on TCP sockets. - SKIP_IF(IsRunningOnGvisor() && - (GetParam().type & SOCK_STREAM) == SOCK_STREAM); - ASSERT_NO_FATAL_FAILURE(BindSocket( /* reuse_port */ true, /* reuse_addr */ true, /* bind_to_device */ 0)); ASSERT_NO_FATAL_FAILURE(BindSocket(/* reuse_port */ true, diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc index a914d9a12..18b9e4b70 100644 --- a/test/syscalls/linux/socket_inet_loopback.cc +++ b/test/syscalls/linux/socket_inet_loopback.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -686,24 +687,9 @@ TEST_P(SocketInetLoopbackTest, TCPFinWait2Test_NoRandomSave) { // be restarted causing the final bind/connect to fail. DisableSave ds; - // TODO(gvisor.dev/issue/1030): Portmanager does not track all 5 tuple - // reservations which causes the bind() to succeed on gVisor but connect - // correctly fails. - if (IsRunningOnGvisor()) { - ASSERT_THAT( - bind(conn_fd2.get(), reinterpret_cast(&conn_bound_addr), - conn_addrlen), - SyscallSucceeds()); - ASSERT_THAT(RetryEINTR(connect)(conn_fd2.get(), - reinterpret_cast(&conn_addr), - conn_addrlen), - SyscallFailsWithErrno(EADDRINUSE)); - } else { - ASSERT_THAT( - bind(conn_fd2.get(), reinterpret_cast(&conn_bound_addr), - conn_addrlen), - SyscallFailsWithErrno(EADDRINUSE)); - } + ASSERT_THAT(bind(conn_fd2.get(), + reinterpret_cast(&conn_bound_addr), conn_addrlen), + SyscallFailsWithErrno(EADDRINUSE)); // Sleep for a little over the linger timeout to reduce flakiness in // save/restore tests. @@ -1737,6 +1723,171 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, DualStackV6AnyReservesEverything) { ASSERT_THAT(bind(fd_v4.get(), reinterpret_cast(&addr_v4), test_addr_v4.addr_len), SyscallFailsWithErrno(EADDRINUSE)); + + // Verify that binding the v4 any on the same port with a v4 socket + // fails. + TestAddress const& test_addr_v4_any = V4Any(); + sockaddr_storage addr_v4_any = test_addr_v4_any.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port)); + const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE( + Socket(test_addr_v4_any.family(), param.type, 0)); + ASSERT_THAT(bind(fd_v4_any.get(), reinterpret_cast(&addr_v4_any), + test_addr_v4_any.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); +} + +TEST_P(SocketMultiProtocolInetLoopbackTest, + DualStackV6AnyReuseAddrDoesNotReserveV4Any) { + auto const& param = GetParam(); + + // Bind the v6 any on a dual stack socket. + TestAddress const& test_addr_dual = V6Any(); + sockaddr_storage addr_dual = test_addr_dual.addr; + const FileDescriptor fd_dual = + ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0)); + ASSERT_THAT(setsockopt(fd_dual.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, + sizeof(kSockOptOn)), + SyscallSucceeds()); + ASSERT_THAT(bind(fd_dual.get(), reinterpret_cast(&addr_dual), + test_addr_dual.addr_len), + SyscallSucceeds()); + + // Get the port that we bound. + socklen_t addrlen = test_addr_dual.addr_len; + ASSERT_THAT(getsockname(fd_dual.get(), + reinterpret_cast(&addr_dual), &addrlen), + SyscallSucceeds()); + uint16_t const port = + ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); + + // Verify that binding the v4 any on the same port with a v4 socket succeeds. + TestAddress const& test_addr_v4_any = V4Any(); + sockaddr_storage addr_v4_any = test_addr_v4_any.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port)); + const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE( + Socket(test_addr_v4_any.family(), param.type, 0)); + ASSERT_THAT(setsockopt(fd_v4_any.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, + sizeof(kSockOptOn)), + SyscallSucceeds()); + ASSERT_THAT(bind(fd_v4_any.get(), reinterpret_cast(&addr_v4_any), + test_addr_v4_any.addr_len), + SyscallSucceeds()); +} + +TEST_P(SocketMultiProtocolInetLoopbackTest, + DualStackV6AnyReuseAddrListenReservesV4Any) { + auto const& param = GetParam(); + + // Only TCP sockets are supported. + SKIP_IF((param.type & SOCK_STREAM) == 0); + + // Bind the v6 any on a dual stack socket. + TestAddress const& test_addr_dual = V6Any(); + sockaddr_storage addr_dual = test_addr_dual.addr; + const FileDescriptor fd_dual = + ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0)); + ASSERT_THAT(setsockopt(fd_dual.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, + sizeof(kSockOptOn)), + SyscallSucceeds()); + ASSERT_THAT(bind(fd_dual.get(), reinterpret_cast(&addr_dual), + test_addr_dual.addr_len), + SyscallSucceeds()); + + ASSERT_THAT(listen(fd_dual.get(), 5), SyscallSucceeds()); + + // Get the port that we bound. + socklen_t addrlen = test_addr_dual.addr_len; + ASSERT_THAT(getsockname(fd_dual.get(), + reinterpret_cast(&addr_dual), &addrlen), + SyscallSucceeds()); + uint16_t const port = + ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); + + // Verify that binding the v4 any on the same port with a v4 socket succeeds. + TestAddress const& test_addr_v4_any = V4Any(); + sockaddr_storage addr_v4_any = test_addr_v4_any.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port)); + const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE( + Socket(test_addr_v4_any.family(), param.type, 0)); + ASSERT_THAT(setsockopt(fd_v4_any.get(), SOL_SOCKET, SO_REUSEADDR, &kSockOptOn, + sizeof(kSockOptOn)), + SyscallSucceeds()); + + ASSERT_THAT(bind(fd_v4_any.get(), reinterpret_cast(&addr_v4_any), + test_addr_v4_any.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); +} + +TEST_P(SocketMultiProtocolInetLoopbackTest, + DualStackV6AnyWithListenReservesEverything) { + auto const& param = GetParam(); + + // Only TCP sockets are supported. + SKIP_IF((param.type & SOCK_STREAM) == 0); + + // Bind the v6 any on a dual stack socket. + TestAddress const& test_addr_dual = V6Any(); + sockaddr_storage addr_dual = test_addr_dual.addr; + const FileDescriptor fd_dual = + ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_dual.family(), param.type, 0)); + ASSERT_THAT(bind(fd_dual.get(), reinterpret_cast(&addr_dual), + test_addr_dual.addr_len), + SyscallSucceeds()); + + ASSERT_THAT(listen(fd_dual.get(), 5), SyscallSucceeds()); + + // Get the port that we bound. + socklen_t addrlen = test_addr_dual.addr_len; + ASSERT_THAT(getsockname(fd_dual.get(), + reinterpret_cast(&addr_dual), &addrlen), + SyscallSucceeds()); + uint16_t const port = + ASSERT_NO_ERRNO_AND_VALUE(AddrPort(test_addr_dual.family(), addr_dual)); + + // Verify that binding the v6 loopback with the same port fails. + TestAddress const& test_addr_v6 = V6Loopback(); + sockaddr_storage addr_v6 = test_addr_v6.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v6.family(), &addr_v6, port)); + const FileDescriptor fd_v6 = + ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v6.family(), param.type, 0)); + ASSERT_THAT(bind(fd_v6.get(), reinterpret_cast(&addr_v6), + test_addr_v6.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); + + // Verify that binding the v4 loopback on the same port with a v6 socket + // fails. + TestAddress const& test_addr_v4_mapped = V4MappedLoopback(); + sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr; + ASSERT_NO_ERRNO( + SetAddrPort(test_addr_v4_mapped.family(), &addr_v4_mapped, port)); + const FileDescriptor fd_v4_mapped = ASSERT_NO_ERRNO_AND_VALUE( + Socket(test_addr_v4_mapped.family(), param.type, 0)); + ASSERT_THAT( + bind(fd_v4_mapped.get(), reinterpret_cast(&addr_v4_mapped), + test_addr_v4_mapped.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); + + // Verify that binding the v4 loopback on the same port with a v4 socket + // fails. + TestAddress const& test_addr_v4 = V4Loopback(); + sockaddr_storage addr_v4 = test_addr_v4.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4.family(), &addr_v4, port)); + const FileDescriptor fd_v4 = + ASSERT_NO_ERRNO_AND_VALUE(Socket(test_addr_v4.family(), param.type, 0)); + ASSERT_THAT(bind(fd_v4.get(), reinterpret_cast(&addr_v4), + test_addr_v4.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); + + // Verify that binding the v4 any on the same port with a v4 socket + // fails. + TestAddress const& test_addr_v4_any = V4Any(); + sockaddr_storage addr_v4_any = test_addr_v4_any.addr; + ASSERT_NO_ERRNO(SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, port)); + const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE( + Socket(test_addr_v4_any.family(), param.type, 0)); + ASSERT_THAT(bind(fd_v4_any.get(), reinterpret_cast(&addr_v4_any), + test_addr_v4_any.addr_len), + SyscallFailsWithErrno(EADDRINUSE)); } TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) { @@ -1798,10 +1949,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6OnlyV6AnyReservesV6) { TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) { auto const& param = GetParam(); - // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make - // it disabled by default. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM); - for (int i = 0; true; i++) { // Bind the v6 loopback on a dual stack socket. TestAddress const& test_addr = V6Loopback(); @@ -1864,17 +2011,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReserved) { test_addr_v6.addr_len), SyscallFailsWithErrno(EADDRINUSE)); - // Verify that binding the v4 any with the same port fails. - TestAddress const& test_addr_v4_any = V4Any(); - sockaddr_storage addr_v4_any = test_addr_v4_any.addr; - ASSERT_NO_ERRNO( - SetAddrPort(test_addr_v4_any.family(), &addr_v4_any, ephemeral_port)); - const FileDescriptor fd_v4_any = ASSERT_NO_ERRNO_AND_VALUE( - Socket(test_addr_v4_any.family(), param.type, 0)); - ASSERT_THAT(bind(fd_v4_any.get(), reinterpret_cast(&addr_v4_any), - test_addr_v4_any.addr_len), - SyscallFailsWithErrno(EADDRINUSE)); - // Verify that we can still bind the v4 loopback on the same port. TestAddress const& test_addr_v4_mapped = V4MappedLoopback(); sockaddr_storage addr_v4_mapped = test_addr_v4_mapped.addr; @@ -1963,10 +2099,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, V6EphemeralPortReservedReuseAddr) { TEST_P(SocketMultiProtocolInetLoopbackTest, V4MappedEphemeralPortReserved) { auto const& param = GetParam(); - // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make - // it disabled by default. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM); - for (int i = 0; true; i++) { // Bind the v4 loopback on a dual stack socket. TestAddress const& test_addr = V4MappedLoopback(); @@ -2152,10 +2284,6 @@ TEST_P(SocketMultiProtocolInetLoopbackTest, TEST_P(SocketMultiProtocolInetLoopbackTest, V4EphemeralPortReserved) { auto const& param = GetParam(); - // FIXME(b/76031995): Support disabling SO_REUSEADDR for TCP sockets and make - // it disabled by default. - SKIP_IF(IsRunningOnGvisor() && param.type == SOCK_STREAM); - for (int i = 0; true; i++) { // Bind the v4 loopback on a v4 socket. TestAddress const& test_addr = V4Loopback(); diff --git a/test/syscalls/linux/socket_ip_unbound.cc b/test/syscalls/linux/socket_ip_unbound.cc index 796598eee..1c7b0cf90 100644 --- a/test/syscalls/linux/socket_ip_unbound.cc +++ b/test/syscalls/linux/socket_ip_unbound.cc @@ -425,10 +425,6 @@ TEST_P(IPUnboundSocketTest, InsufficientBufferTOS) { TEST_P(IPUnboundSocketTest, ReuseAddrDefault) { auto socket = ASSERT_NO_ERRNO_AND_VALUE(NewSocket()); - // FIXME(b/76031995): gVisor TCP sockets default to SO_REUSEADDR enabled. - SKIP_IF(IsRunningOnGvisor() && - (GetParam().type & SOCK_STREAM) == SOCK_STREAM); - int get = -1; socklen_t get_sz = sizeof(get); ASSERT_THAT( -- cgit v1.2.3