summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorMithun Iyer <iyerm@google.com>2021-04-13 00:56:32 -0700
committergVisor bot <gvisor-bot@google.com>2021-04-13 00:58:56 -0700
commit326394b79a62061e3e239ac104c151ca13647439 (patch)
treea67047c75a10c8c8bc81d7261767407ddfffc683 /pkg
parente5f58e89bbd376469073c749592d0fb0e3b4c6cb (diff)
Fix listener close, client connect race
Fix a race where the ACK completing the handshake can be dropped by a closing listener without RST to the peer. The listener close would reset the accepted queue and that causes the connecting endpoint in SYNRCVD state to drop the ACK thinking the queue if filled up. PiperOrigin-RevId: 368165509
Diffstat (limited to 'pkg')
-rw-r--r--pkg/tcpip/transport/tcp/accept.go10
-rw-r--r--pkg/tcpip/transport/tcp/endpoint.go2
-rw-r--r--pkg/tcpip/transport/tcp/endpoint_state.go2
3 files changed, 7 insertions, 7 deletions
diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go
index 7acc7e7b0..63c46b1be 100644
--- a/pkg/tcpip/transport/tcp/accept.go
+++ b/pkg/tcpip/transport/tcp/accept.go
@@ -511,22 +511,22 @@ func (e *endpoint) handleSynSegment(ctx *listenContext, s *segment, opts *header
func (e *endpoint) synRcvdBacklogFull() bool {
e.acceptMu.Lock()
- backlog := e.accepted.cap
+ acceptedCap := e.accepted.cap
e.acceptMu.Unlock()
- // The allocated accepted channel size would always be one greater than the
+ // The capacity of the accepted queue would always be one greater than the
// listen backlog. But, the SYNRCVD connections count is always checked
// against the listen backlog value for Linux parity reason.
// https://github.com/torvalds/linux/blob/7acac4b3196/include/net/inet_connection_sock.h#L280
//
// We maintain an equality check here as the synRcvdCount is incremented
// and compared only from a single listener context and the capacity of
- // the accepted channel can only increase by a new listen call.
- return int(atomic.LoadInt32(&e.synRcvdCount)) == backlog-1
+ // the accepted queue can only increase by a new listen call.
+ return int(atomic.LoadInt32(&e.synRcvdCount)) == acceptedCap-1
}
func (e *endpoint) acceptQueueIsFull() bool {
e.acceptMu.Lock()
- full := e.accepted.endpoints.Len() == e.accepted.cap
+ full := e.accepted != (accepted{}) && e.accepted.endpoints.Len() == e.accepted.cap
e.acceptMu.Unlock()
return full
}
diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go
index 1060a0a90..9afd2bb7f 100644
--- a/pkg/tcpip/transport/tcp/endpoint.go
+++ b/pkg/tcpip/transport/tcp/endpoint.go
@@ -962,7 +962,7 @@ func (e *endpoint) Readiness(mask waiter.EventMask) waiter.EventMask {
result = mask
case StateListen:
- // Check if there's anything in the accepted channel.
+ // Check if there's anything in the accepted queue.
if (mask & waiter.ReadableEvents) != 0 {
e.acceptMu.Lock()
if e.accepted.endpoints.Len() != 0 {
diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go
index f51b3ad90..590775434 100644
--- a/pkg/tcpip/transport/tcp/endpoint_state.go
+++ b/pkg/tcpip/transport/tcp/endpoint_state.go
@@ -67,7 +67,7 @@ func (e *endpoint) beforeSave() {
e.mu.Lock()
}
if !e.workerRunning {
- // The endpoint must be in acceptedChan or has been just
+ // The endpoint must be in the accepted queue or has been just
// disconnected and closed.
break
}