summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorArthur Sfez <asfez@google.com>2021-09-21 16:09:58 -0700
committergVisor bot <gvisor-bot@google.com>2021-09-21 16:13:18 -0700
commitc633a7f9d1c15c8f1639a95809d875576ac7707f (patch)
treecc4fcc8907d982225664616470e4f04905797c37 /test
parentc485602ded3982f8fd1078e34a4b298879f1807e (diff)
Deliver endpoints to the accept queue synchronously when possible
Before this change, when a new connection was created after receiving an ACK that matched a SYN-cookie, it was always delivered asynchronously to the accept queue. There was a chance that the listening endpoint would process a SYN from another client before the delivery happened, and the listening endpoint would not know yet that the queue was about to be full, once the delivery happened. Now, when an ACK matching a SYN-cookie is received, the new endpoint is created and moved to the accept queue synchronously, while holding the accept lock. Fixes #6545 PiperOrigin-RevId: 398107254
Diffstat (limited to 'test')
-rw-r--r--test/syscalls/linux/tcp_socket.cc60
1 files changed, 60 insertions, 0 deletions
diff --git a/test/syscalls/linux/tcp_socket.cc b/test/syscalls/linux/tcp_socket.cc
index 3fbbf1423..607182ffd 100644
--- a/test/syscalls/linux/tcp_socket.cc
+++ b/test/syscalls/linux/tcp_socket.cc
@@ -2088,6 +2088,66 @@ TEST_P(SimpleTcpSocketTest, ConnectUnspecifiedAddress) {
}
}
+TEST_P(SimpleTcpSocketTest, OnlyAcknowledgeBacklogConnections) {
+ // At some point, there was a bug in gVisor where a connection could be
+ // SYN-ACK'd by the server even if the accept queue was already full. This was
+ // possible because once the listener would process an ACK, it would move the
+ // new connection in the accept queue asynchronously. It created an
+ // opportunity where the listener could process another SYN before completing
+ // the delivery that would have filled the accept queue.
+ //
+ // This test checks that there is no such race.
+
+ std::array<std::optional<ScopedThread>, 100> threads;
+ for (auto& thread : threads) {
+ thread.emplace([]() {
+ FileDescriptor bound_s = ASSERT_NO_ERRNO_AND_VALUE(
+ Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
+
+ sockaddr_storage bound_addr =
+ ASSERT_NO_ERRNO_AND_VALUE(InetLoopbackAddr(GetParam()));
+ socklen_t bound_addrlen = sizeof(bound_addr);
+
+ ASSERT_THAT(bind(bound_s.get(), AsSockAddr(&bound_addr), bound_addrlen),
+ SyscallSucceeds());
+
+ // Start listening. Use a zero backlog to only allow one connection in the
+ // accept queue.
+ ASSERT_THAT(listen(bound_s.get(), 0), SyscallSucceeds());
+
+ // Get the addresses the socket is bound to because the port is chosen by
+ // the stack.
+ ASSERT_THAT(
+ getsockname(bound_s.get(), AsSockAddr(&bound_addr), &bound_addrlen),
+ SyscallSucceeds());
+
+ // Establish a connection, but do not accept it.
+ FileDescriptor connected_s = ASSERT_NO_ERRNO_AND_VALUE(
+ Socket(GetParam(), SOCK_STREAM, IPPROTO_TCP));
+ ASSERT_THAT(connect(connected_s.get(),
+ reinterpret_cast<const struct sockaddr*>(&bound_addr),
+ bound_addrlen),
+ SyscallSucceeds());
+
+ // Immediately attempt to establish another connection. Use non blocking
+ // socket because this is expected to timeout.
+ FileDescriptor connecting_s = ASSERT_NO_ERRNO_AND_VALUE(
+ Socket(GetParam(), SOCK_STREAM | SOCK_NONBLOCK, IPPROTO_TCP));
+ ASSERT_THAT(connect(connecting_s.get(),
+ reinterpret_cast<const struct sockaddr*>(&bound_addr),
+ bound_addrlen),
+ SyscallFailsWithErrno(EINPROGRESS));
+
+ struct pollfd poll_fd = {
+ .fd = connecting_s.get(),
+ .events = POLLOUT,
+ };
+ EXPECT_THAT(RetryEINTR(poll)(&poll_fd, 1, 10),
+ SyscallSucceedsWithValue(0));
+ });
+ }
+}
+
// Tests that send will return EWOULDBLOCK initially with large buffer and will
// succeed after the send buffer size is increased.
TEST_P(TcpSocketTest, SendUnblocksOnSendBufferIncrease) {