summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorTamir Duberstein <tamird@google.com>2021-09-30 20:43:47 -0700
committergVisor bot <gvisor-bot@google.com>2021-09-30 20:46:27 -0700
commit84f1578e9eae732dcff77e05dab46fdb98e508c0 (patch)
tree2887958f3df99d7b32f63e133b0e87a19f99dc03 /test
parent9fff9469a29dbab57abde3cc17c01f0e987dc150 (diff)
Test syn and accept queue behavior on shutdown
Enhance the backlog test to exercise the syn queue. Updates #6671. PiperOrigin-RevId: 400094530
Diffstat (limited to 'test')
-rw-r--r--test/packetimpact/tests/tcp_listen_backlog_test.go311
1 files changed, 262 insertions, 49 deletions
diff --git a/test/packetimpact/tests/tcp_listen_backlog_test.go b/test/packetimpact/tests/tcp_listen_backlog_test.go
index fea7d5b6f..e4e12a9b5 100644
--- a/test/packetimpact/tests/tcp_listen_backlog_test.go
+++ b/test/packetimpact/tests/tcp_listen_backlog_test.go
@@ -15,7 +15,9 @@
package tcp_listen_backlog_test
import (
+ "bytes"
"flag"
+ "sync"
"testing"
"time"
@@ -35,60 +37,271 @@ func init() {
func TestTCPListenBacklog(t *testing.T) {
dut := testbench.NewDUT(t)
- // Listening endpoint accepts one more connection than the listen backlog.
- listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM, unix.IPPROTO_TCP, 0 /*backlog*/)
+ // This is the number of pending connections before SYN cookies are used.
+ const backlog = 10
- var establishedConn testbench.TCPIPv4
- var incompleteConn testbench.TCPIPv4
+ listenFd, remotePort := dut.CreateListener(t, unix.SOCK_STREAM|unix.SOCK_NONBLOCK, unix.IPPROTO_TCP, backlog)
+ defer dut.Close(t, listenFd)
- // Test if the DUT listener replies to more SYNs than listen backlog+1
- for i, conn := range []*testbench.TCPIPv4{&establishedConn, &incompleteConn} {
- *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- // Expect dut connection to have transitioned to SYN-RCVD state.
- conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected SYN-ACK for %d connection, %s", i, err)
+ // Fill the SYN queue with connections in SYN-RCVD. We will use these to test
+ // that ACKs received while the accept queue is full are ignored.
+ var synQueueConns [backlog]testbench.TCPIPv4
+ defer func() {
+ for i := range synQueueConns {
+ synQueueConns[i].Close(t)
+ }
+ }()
+ {
+ var wg sync.WaitGroup
+ for i := range synQueueConns {
+ conn := &synQueueConns[i]
+ *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{})
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagSyn|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }(i)
+ }
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
}
}
- defer establishedConn.Close(t)
- defer incompleteConn.Close(t)
-
- // Send the ACK to complete handshake.
- establishedConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
-
- // Poll for the established connection ready for accept.
- dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
-
- // Send the ACK to complete handshake, expect this to be dropped by the
- // listener as the accept queue would be full because of the previous
- // handshake.
- incompleteConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- // Let the test wait for sometime so that the ACK is indeed dropped by
- // the listener. Without such a wait, the DUT accept can race with
- // ACK handling (dropping) causing the test to be flaky.
- time.Sleep(100 * time.Millisecond)
-
- // Drain the accept queue to enable poll for subsequent connections on the
- // listener.
- fd, _ := dut.Accept(t, listenFd)
- dut.Close(t, fd)
-
- // The ACK for the incomplete connection should be ignored by the
- // listening endpoint and the poll on listener should now time out.
- if pfds := dut.Poll(t, []unix.PollFd{{Fd: listenFd, Events: unix.POLLIN}}, time.Second); len(pfds) != 0 {
- t.Fatalf("got dut.Poll(...) = %#v", pfds)
+
+ const payloadLen = 1
+ payload := testbench.Payload{Bytes: testbench.GenerateRandomPayload(t, payloadLen)}
+
+ // Fill the accept queue with connections established using SYN cookies.
+ var synCookieConns [backlog + 1]testbench.TCPIPv4
+ defer func() {
+ for i := range synCookieConns {
+ synCookieConns[i].Close(t)
+ }
+ }()
+ {
+ var wg sync.WaitGroup
+ for i := range synCookieConns {
+ conn := &synCookieConns[i]
+ *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{})
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagSyn|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ // Send a payload so we can observe the dut ACK.
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, &payload)
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }(i)
+ }
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
}
- // Re-send the ACK to complete handshake and re-fill the accept-queue.
- incompleteConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
- dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
-
- // Now initiate a new connection when the accept queue is full.
- connectingConn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{SrcPort: &remotePort})
- defer connectingConn.Close(t)
- // Expect dut connection to drop the SYN and let the client stay in SYN_SENT state.
- connectingConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
- if got, err := connectingConn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err == nil {
- t.Fatalf("expected no SYN-ACK, but got %s", got)
+ // Send ACKs to complete the handshakes. These are expected to be dropped
+ // because the accept queue is full.
+ {
+ var wg sync.WaitGroup
+ for i := range synQueueConns {
+ conn := &synQueueConns[i]
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ // Wait for the SYN-ACK to be retransmitted to confirm the ACK was
+ // dropped.
+ seqNum := uint32(*conn.RemoteSeqNum(t) - 1)
+ if got, err := conn.Expect(t, testbench.TCP{SeqNum: &seqNum}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagSyn|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
+ }
+
+ func() {
+ // Now initiate a new connection when the accept queue is full.
+ connectingConn := dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{})
+ defer connectingConn.Close(t)
+ // Expect dut connection to drop the SYN.
+ connectingConn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if got, err := connectingConn.Expect(t, testbench.TCP{}, time.Second); err == nil {
+ t.Fatalf("expected no TCP frame, got %s", got)
+ }
+ }()
+
+ // Drain the accept queue.
+ {
+ var wg sync.WaitGroup
+ for i := range synCookieConns {
+ conn := &synCookieConns[i]
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ fd, _ := dut.Accept(t, listenFd)
+ b := dut.Recv(t, fd, payloadLen+1, 0)
+ dut.Close(t, fd)
+ if !bytes.Equal(b, payload.Bytes) {
+ t.Errorf("connection %d: got dut.Recv = %x, want = %x", i, b, payload.Bytes)
+ }
+
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagFin|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+
+ // Prevent retransmission.
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ }(i)
+ }
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
}
+
+ // Complete the partial connections to move them from the SYN queue to the
+ // accept queue. We will use these to test that connections in the accept
+ // queue are closed on listener shutdown.
+ {
+ var wg sync.WaitGroup
+ for i := range synQueueConns {
+ conn := &synQueueConns[i]
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ tcp := testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}
+
+ // Exercise connections with and without pending data.
+ if i%2 == 0 {
+ // Send ACK with no payload; wait for absence of SYN-ACK retransmit.
+ conn.Send(t, tcp)
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err == nil {
+ t.Errorf("%d: expected no TCP frame, got %s", i, got)
+ }
+ } else {
+ // Send ACK with payload; wait for ACK.
+ conn.Send(t, tcp, &payload)
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
+ }
+
+ // The accept queue now has N-1 connections in it. The next incoming SYN will
+ // enter the SYN queue, and the one following will use SYN cookies. We test
+ // both.
+ var connectingConns [2]testbench.TCPIPv4
+ defer func() {
+ for i := range connectingConns {
+ connectingConns[i].Close(t)
+ }
+ }()
+ {
+ var wg sync.WaitGroup
+ for i := range connectingConns {
+ conn := &connectingConns[i]
+ *conn = dut.Net.NewTCPIPv4(t, testbench.TCP{DstPort: &remotePort}, testbench.TCP{})
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagSyn|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }(i)
+ }
+ wg.Wait()
+ if t.Failed() {
+ t.FailNow()
+ }
+ }
+
+ dut.Shutdown(t, listenFd, unix.SHUT_RD)
+
+ var wg sync.WaitGroup
+
+ // Shutdown causes Connections in the accept queue to be closed.
+ for i := range synQueueConns {
+ conn := &synQueueConns[i]
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagRst|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ }(i)
+ }
+
+ for i := range connectingConns {
+ conn := &connectingConns[i]
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ // The pending connection in the SYN queue is now a zombie on gVisor.
+ //
+ // TODO(https://gvisor.dev/issues/6671): Stop retransmitting the SYN-ACK.
+ if i == 0 && !dut.Uname.IsLinux() {
+ seqNum := uint32(*conn.RemoteSeqNum(t) - 1)
+ if got, err := conn.Expect(t, testbench.TCP{SeqNum: &seqNum}, time.Second); err != nil {
+ t.Errorf("%d: expected TCP frame: %s", i, err)
+ } else if got, want := *got.Flags, header.TCPFlagSyn|header.TCPFlagAck; got != want {
+ t.Errorf("%d: got %s, want %s", i, got, want)
+ }
+ } else {
+ if got, err := conn.Expect(t, testbench.TCP{}, time.Second); err == nil {
+ t.Errorf("%d: expected no TCP frame, got %s", i, got)
+ }
+ }
+ }(i)
+ }
+
+ wg.Wait()
}