diff options
author | Mithun Iyer <iyerm@google.com> | 2021-04-13 00:56:32 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2021-04-13 00:58:56 -0700 |
commit | 326394b79a62061e3e239ac104c151ca13647439 (patch) | |
tree | a67047c75a10c8c8bc81d7261767407ddfffc683 /pkg/tcpip/transport/tcp/accept.go | |
parent | e5f58e89bbd376469073c749592d0fb0e3b4c6cb (diff) |
Fix listener close, client connect race
Fix a race where the ACK completing the handshake can be dropped by
a closing listener without RST to the peer. The listener close would
reset the accepted queue and that causes the connecting endpoint
in SYNRCVD state to drop the ACK thinking the queue if filled up.
PiperOrigin-RevId: 368165509
Diffstat (limited to 'pkg/tcpip/transport/tcp/accept.go')
-rw-r--r-- | pkg/tcpip/transport/tcp/accept.go | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index 7acc7e7b0..63c46b1be 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -511,22 +511,22 @@ func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header func (e *endpoint) synRcvdBacklogFull() bool { e.acceptMu.Lock() - backlog := e.accepted.cap + acceptedCap := e.accepted.cap e.acceptMu.Unlock() - // The allocated accepted channel size would always be one greater than the + // The capacity of the accepted queue would always be one greater than the // listen backlog. But, the SYNRCVD connections count is always checked // against the listen backlog value for Linux parity reason. // https://github.com/torvalds/linux/blob/7acac4b3196/include/net/inet_connection_sock.h#L280 // // We maintain an equality check here as the synRcvdCount is incremented // and compared only from a single listener context and the capacity of - // the accepted channel can only increase by a new listen call. - return int(atomic.LoadInt32(&e.synRcvdCount)) == backlog-1 + // the accepted queue can only increase by a new listen call. + return int(atomic.LoadInt32(&e.synRcvdCount)) == acceptedCap-1 } func (e *endpoint) acceptQueueIsFull() bool { e.acceptMu.Lock() - full := e.accepted.endpoints.Len() == e.accepted.cap + full := e.accepted != (accepted{}) && e.accepted.endpoints.Len() == e.accepted.cap e.acceptMu.Unlock() return full } |