From e855e9cebc45f5fd7a9583f476c8965fc395a15e Mon Sep 17 00:00:00 2001 From: Zhaozhong Ni Date: Thu, 23 Aug 2018 16:13:22 -0700 Subject: netstack: make listening tcp socket close state setting and cleanup atomic. Otherwise the socket saving logic might find workers still running for closed sockets unexpectedly. PiperOrigin-RevId: 210018905 Change-Id: I443a04d355613f5f9983252cc6863bff6e0eda3a --- pkg/tcpip/transport/tcp/accept.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'pkg/tcpip/transport') diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go index 63058e420..c22ed5ea7 100644 --- a/pkg/tcpip/transport/tcp/accept.go +++ b/pkg/tcpip/transport/tcp/accept.go @@ -363,11 +363,6 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error { e.mu.Lock() e.state = stateClosed - // Notify waiters that the endpoint is shutdown. - e.mu.Unlock() - e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut) - e.mu.Lock() - // Do cleanup if needed. e.completeWorkerLocked() @@ -375,6 +370,9 @@ func (e *endpoint) protocolListenLoop(rcvWnd seqnum.Size) *tcpip.Error { close(e.drainDone) } e.mu.Unlock() + + // Notify waiters that the endpoint is shutdown. + e.waiterQueue.Notify(waiter.EventIn | waiter.EventOut) }() e.mu.Lock() -- cgit v1.2.3